2024-12-10 00:26:41,957 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-10 00:26:41,973 main DEBUG Took 0.013688 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 00:26:41,974 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 00:26:41,974 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 00:26:41,976 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 00:26:41,978 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:41,988 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 00:26:42,005 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,007 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,008 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,009 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,010 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,011 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,012 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,012 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,013 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,014 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,015 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,015 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,016 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,017 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,017 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,018 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,018 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,019 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,019 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,020 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,020 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 00:26:42,021 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,021 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 00:26:42,024 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 00:26:42,025 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 00:26:42,028 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 00:26:42,028 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 00:26:42,030 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 00:26:42,030 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 00:26:42,042 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 00:26:42,045 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 00:26:42,047 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 00:26:42,047 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 00:26:42,048 main DEBUG createAppenders(={Console}) 2024-12-10 00:26:42,049 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-12-10 00:26:42,049 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-10 00:26:42,049 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-12-10 00:26:42,050 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 00:26:42,050 main DEBUG OutputStream closed 2024-12-10 00:26:42,051 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 00:26:42,051 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 00:26:42,051 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-12-10 00:26:42,135 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 00:26:42,137 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 00:26:42,139 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 00:26:42,139 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 00:26:42,140 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 00:26:42,140 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 00:26:42,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 00:26:42,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 00:26:42,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 00:26:42,142 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 00:26:42,142 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 00:26:42,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 00:26:42,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 00:26:42,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 00:26:42,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 00:26:42,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 00:26:42,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 00:26:42,145 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 00:26:42,147 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 00:26:42,147 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-12-10 00:26:42,147 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 00:26:42,148 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-12-10T00:26:42,348 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a 2024-12-10 00:26:42,351 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 00:26:42,351 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T00:26:42,359 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-12-10T00:26:42,365 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-12-10T00:26:42,388 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T00:26:42,429 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T00:26:42,429 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T00:26:42,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T00:26:42,455 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35, deleteOnExit=true 2024-12-10T00:26:42,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-10T00:26:42,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/test.cache.data in system properties and HBase conf 2024-12-10T00:26:42,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T00:26:42,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir in system properties and HBase conf 2024-12-10T00:26:42,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T00:26:42,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T00:26:42,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T00:26:42,539 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T00:26:42,626 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T00:26:42,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T00:26:42,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T00:26:42,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T00:26:42,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T00:26:42,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T00:26:42,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T00:26:42,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T00:26:42,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T00:26:42,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T00:26:42,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/nfs.dump.dir in system properties and HBase conf 2024-12-10T00:26:42,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/java.io.tmpdir in system properties and HBase conf 2024-12-10T00:26:42,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T00:26:42,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T00:26:42,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T00:26:43,715 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T00:26:43,782 INFO [Time-limited test {}] log.Log(170): Logging initialized @2554ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T00:26:43,851 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T00:26:43,923 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T00:26:43,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T00:26:43,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T00:26:43,947 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T00:26:43,964 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T00:26:43,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,AVAILABLE} 2024-12-10T00:26:43,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T00:26:44,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/java.io.tmpdir/jetty-localhost-39133-hadoop-hdfs-3_4_1-tests_jar-_-any-5503894266804161570/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T00:26:44,168 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:39133} 2024-12-10T00:26:44,169 INFO [Time-limited test {}] server.Server(415): Started @2940ms 2024-12-10T00:26:44,886 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T00:26:44,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T00:26:44,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T00:26:44,903 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T00:26:44,903 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T00:26:44,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f76f489{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,AVAILABLE} 2024-12-10T00:26:44,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@433df981{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T00:26:45,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36632d60{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/java.io.tmpdir/jetty-localhost-37799-hadoop-hdfs-3_4_1-tests_jar-_-any-9170284882309857414/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T00:26:45,035 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@751d2fa4{HTTP/1.1, (http/1.1)}{localhost:37799} 2024-12-10T00:26:45,035 INFO [Time-limited test {}] server.Server(415): Started @3807ms 2024-12-10T00:26:45,108 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T00:26:45,286 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T00:26:45,295 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T00:26:45,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T00:26:45,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T00:26:45,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T00:26:45,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b5fc47c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,AVAILABLE} 2024-12-10T00:26:45,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ebbf344{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T00:26:45,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4546bb60{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/java.io.tmpdir/jetty-localhost-35995-hadoop-hdfs-3_4_1-tests_jar-_-any-11717701247632910276/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T00:26:45,439 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c7d32f8{HTTP/1.1, (http/1.1)}{localhost:35995} 2024-12-10T00:26:45,439 INFO [Time-limited test {}] server.Server(415): Started @4210ms 2024-12-10T00:26:45,442 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T00:26:45,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T00:26:45,513 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T00:26:45,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T00:26:45,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T00:26:45,516 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T00:26:45,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7da22a2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,AVAILABLE} 2024-12-10T00:26:45,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f079a76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T00:26:45,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43206bef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/java.io.tmpdir/jetty-localhost-44535-hadoop-hdfs-3_4_1-tests_jar-_-any-10001189146709507705/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T00:26:45,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@228ffa29{HTTP/1.1, (http/1.1)}{localhost:44535} 2024-12-10T00:26:45,623 INFO [Time-limited test {}] server.Server(415): Started @4394ms 2024-12-10T00:26:45,625 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T00:26:47,366 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data2/current/BP-313768122-172.17.0.2-1733790403195/current, will proceed with Du for space computation calculation, 2024-12-10T00:26:47,366 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data4/current/BP-313768122-172.17.0.2-1733790403195/current, will proceed with Du for space computation calculation, 2024-12-10T00:26:47,366 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data1/current/BP-313768122-172.17.0.2-1733790403195/current, will proceed with Du for space computation calculation, 2024-12-10T00:26:47,366 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data3/current/BP-313768122-172.17.0.2-1733790403195/current, will proceed with Du for space computation calculation, 2024-12-10T00:26:47,405 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T00:26:47,405 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T00:26:47,450 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fd0a55cd3b3d3bb with lease ID 0x7bb5d369569bea91: Processing first storage report for DS-4e56f297-94df-44d4-8403-f4675f27f5bf from datanode DatanodeRegistration(127.0.0.1:37237, datanodeUuid=4313b812-b7fa-42ae-8dc6-44033a19389c, infoPort=39943, infoSecurePort=0, ipcPort=33051, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196) 2024-12-10T00:26:47,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fd0a55cd3b3d3bb with lease ID 0x7bb5d369569bea91: from storage DS-4e56f297-94df-44d4-8403-f4675f27f5bf node DatanodeRegistration(127.0.0.1:37237, datanodeUuid=4313b812-b7fa-42ae-8dc6-44033a19389c, infoPort=39943, infoSecurePort=0, ipcPort=33051, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T00:26:47,452 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x807e6e9457f5d3a8 with lease ID 0x7bb5d369569bea92: Processing first storage report for DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc from datanode DatanodeRegistration(127.0.0.1:35811, datanodeUuid=fff3098e-8200-4489-9cf1-27afa7f9c4a0, infoPort=36533, infoSecurePort=0, ipcPort=43885, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196) 2024-12-10T00:26:47,452 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x807e6e9457f5d3a8 with lease ID 0x7bb5d369569bea92: from storage DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc node DatanodeRegistration(127.0.0.1:35811, datanodeUuid=fff3098e-8200-4489-9cf1-27afa7f9c4a0, infoPort=36533, infoSecurePort=0, ipcPort=43885, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T00:26:47,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fd0a55cd3b3d3bb with lease ID 0x7bb5d369569bea91: Processing first storage report for DS-4ee6f649-01af-479a-a657-1f8578a3faf2 from datanode DatanodeRegistration(127.0.0.1:37237, datanodeUuid=4313b812-b7fa-42ae-8dc6-44033a19389c, infoPort=39943, infoSecurePort=0, ipcPort=33051, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196) 2024-12-10T00:26:47,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fd0a55cd3b3d3bb with lease ID 0x7bb5d369569bea91: from storage DS-4ee6f649-01af-479a-a657-1f8578a3faf2 node DatanodeRegistration(127.0.0.1:37237, datanodeUuid=4313b812-b7fa-42ae-8dc6-44033a19389c, infoPort=39943, infoSecurePort=0, ipcPort=33051, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T00:26:47,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x807e6e9457f5d3a8 with lease ID 0x7bb5d369569bea92: Processing first storage report for DS-97dd1040-b276-45c2-8bc9-27348ce4ec99 from datanode DatanodeRegistration(127.0.0.1:35811, datanodeUuid=fff3098e-8200-4489-9cf1-27afa7f9c4a0, infoPort=36533, infoSecurePort=0, ipcPort=43885, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196) 2024-12-10T00:26:47,454 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x807e6e9457f5d3a8 with lease ID 0x7bb5d369569bea92: from storage DS-97dd1040-b276-45c2-8bc9-27348ce4ec99 node DatanodeRegistration(127.0.0.1:35811, datanodeUuid=fff3098e-8200-4489-9cf1-27afa7f9c4a0, infoPort=36533, infoSecurePort=0, ipcPort=43885, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T00:26:47,616 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data6/current/BP-313768122-172.17.0.2-1733790403195/current, will proceed with Du for space computation calculation, 2024-12-10T00:26:47,616 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data5/current/BP-313768122-172.17.0.2-1733790403195/current, will proceed with Du for space computation calculation, 2024-12-10T00:26:47,633 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T00:26:47,638 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe62ae2fa9cab5adf with lease ID 0x7bb5d369569bea93: Processing first storage report for DS-1c8c0855-c991-44b1-833b-e2ac9409ecea from datanode DatanodeRegistration(127.0.0.1:34693, datanodeUuid=de0f791f-5bfa-4730-a30a-2d4362809fdb, infoPort=41935, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196) 2024-12-10T00:26:47,638 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe62ae2fa9cab5adf with lease ID 0x7bb5d369569bea93: from storage DS-1c8c0855-c991-44b1-833b-e2ac9409ecea node DatanodeRegistration(127.0.0.1:34693, datanodeUuid=de0f791f-5bfa-4730-a30a-2d4362809fdb, infoPort=41935, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T00:26:47,638 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe62ae2fa9cab5adf with lease ID 0x7bb5d369569bea93: Processing first storage report for DS-d27eef1a-686a-4ef2-9d70-9b2bb34f2c15 from datanode DatanodeRegistration(127.0.0.1:34693, datanodeUuid=de0f791f-5bfa-4730-a30a-2d4362809fdb, infoPort=41935, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196) 2024-12-10T00:26:47,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe62ae2fa9cab5adf with lease ID 0x7bb5d369569bea93: from storage DS-d27eef1a-686a-4ef2-9d70-9b2bb34f2c15 node DatanodeRegistration(127.0.0.1:34693, datanodeUuid=de0f791f-5bfa-4730-a30a-2d4362809fdb, infoPort=41935, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=510562633;c=1733790403196), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T00:26:47,727 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a 2024-12-10T00:26:47,817 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/zookeeper_0, clientPort=51780, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T00:26:47,837 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51780 2024-12-10T00:26:47,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:47,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:48,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741825_1001 (size=7) 2024-12-10T00:26:48,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741825_1001 (size=7) 2024-12-10T00:26:48,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741825_1001 (size=7) 2024-12-10T00:26:48,454 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507 with version=8 2024-12-10T00:26:48,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/hbase-staging 2024-12-10T00:26:48,712 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a67c4886b4f7:0 server-side Connection retries=45 2024-12-10T00:26:48,721 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:48,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:48,726 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T00:26:48,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:48,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T00:26:48,883 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T00:26:48,940 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T00:26:48,948 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T00:26:48,952 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T00:26:48,978 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 59882 (auto-detected) 2024-12-10T00:26:48,979 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T00:26:48,998 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41433 2024-12-10T00:26:49,019 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41433 connecting to ZooKeeper ensemble=127.0.0.1:51780 2024-12-10T00:26:49,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:414330x0, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T00:26:49,146 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41433-0x1000d1e23400000 connected 2024-12-10T00:26:49,621 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:49,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:49,639 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T00:26:49,642 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507, hbase.cluster.distributed=false 2024-12-10T00:26:49,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T00:26:49,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41433 2024-12-10T00:26:49,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41433 2024-12-10T00:26:49,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41433 2024-12-10T00:26:49,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41433 2024-12-10T00:26:49,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41433 2024-12-10T00:26:49,757 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a67c4886b4f7:0 server-side Connection retries=45 2024-12-10T00:26:49,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:49,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:49,758 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T00:26:49,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:49,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T00:26:49,761 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T00:26:49,763 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T00:26:49,763 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39473 2024-12-10T00:26:49,765 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39473 connecting to ZooKeeper ensemble=127.0.0.1:51780 2024-12-10T00:26:49,766 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:49,770 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:49,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:394730x0, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T00:26:49,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:394730x0, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T00:26:49,867 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39473-0x1000d1e23400001 connected 2024-12-10T00:26:49,872 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T00:26:49,881 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T00:26:49,883 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T00:26:49,889 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T00:26:49,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39473 2024-12-10T00:26:49,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39473 2024-12-10T00:26:49,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39473 2024-12-10T00:26:49,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39473 2024-12-10T00:26:49,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39473 2024-12-10T00:26:49,907 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a67c4886b4f7:0 server-side Connection retries=45 2024-12-10T00:26:49,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:49,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:49,907 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T00:26:49,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:49,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T00:26:49,908 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T00:26:49,908 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T00:26:49,909 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36039 2024-12-10T00:26:49,910 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36039 connecting to ZooKeeper ensemble=127.0.0.1:51780 2024-12-10T00:26:49,911 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:49,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:50,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360390x0, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T00:26:50,003 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360390x0, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T00:26:50,003 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36039-0x1000d1e23400002 connected 2024-12-10T00:26:50,003 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T00:26:50,005 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T00:26:50,006 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T00:26:50,009 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T00:26:50,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36039 2024-12-10T00:26:50,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36039 2024-12-10T00:26:50,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36039 2024-12-10T00:26:50,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36039 2024-12-10T00:26:50,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36039 2024-12-10T00:26:50,027 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a67c4886b4f7:0 server-side Connection retries=45 2024-12-10T00:26:50,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:50,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:50,028 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T00:26:50,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T00:26:50,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T00:26:50,028 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T00:26:50,028 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T00:26:50,029 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42867 2024-12-10T00:26:50,030 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42867 connecting to ZooKeeper ensemble=127.0.0.1:51780 2024-12-10T00:26:50,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:50,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:50,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428670x0, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T00:26:50,065 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42867-0x1000d1e23400003 connected 2024-12-10T00:26:50,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T00:26:50,066 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T00:26:50,067 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T00:26:50,068 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T00:26:50,073 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T00:26:50,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42867 2024-12-10T00:26:50,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42867 2024-12-10T00:26:50,079 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42867 2024-12-10T00:26:50,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42867 2024-12-10T00:26:50,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42867 2024-12-10T00:26:50,097 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a67c4886b4f7:41433 2024-12-10T00:26:50,098 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:50,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,154 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:50,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T00:26:50,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T00:26:50,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T00:26:50,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,200 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T00:26:50,202 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a67c4886b4f7,41433,1733790408551 from backup master directory 2024-12-10T00:26:50,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:50,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T00:26:50,212 WARN [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T00:26:50,213 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:50,215 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T00:26:50,216 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T00:26:50,268 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/hbase.id] with ID: d36e6add-ccf9-4630-ab24-60fe546afd9a 2024-12-10T00:26:50,268 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/.tmp/hbase.id 2024-12-10T00:26:50,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741826_1002 (size=42) 2024-12-10T00:26:50,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741826_1002 (size=42) 2024-12-10T00:26:50,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741826_1002 (size=42) 2024-12-10T00:26:50,283 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/.tmp/hbase.id]:[hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/hbase.id] 2024-12-10T00:26:50,328 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T00:26:50,333 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T00:26:50,353 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-10T00:26:50,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741827_1003 (size=196) 2024-12-10T00:26:50,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741827_1003 (size=196) 2024-12-10T00:26:50,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741827_1003 (size=196) 2024-12-10T00:26:50,406 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T00:26:50,408 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T00:26:50,414 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:50,439 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:50,439 WARN [IPC Server handler 0 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:50,440 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:50,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741828_1004 (size=1189) 2024-12-10T00:26:50,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741828_1004 (size=1189) 2024-12-10T00:26:50,466 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store 2024-12-10T00:26:50,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741829_1005 (size=34) 2024-12-10T00:26:50,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741829_1005 (size=34) 2024-12-10T00:26:50,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741829_1005 (size=34) 2024-12-10T00:26:50,497 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T00:26:50,500 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:50,501 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T00:26:50,501 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:26:50,502 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:26:50,503 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T00:26:50,503 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:26:50,503 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:26:50,504 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733790410501Disabling compacts and flushes for region at 1733790410501Disabling writes for close at 1733790410503 (+2 ms)Writing region close event to WAL at 1733790410503Closed at 1733790410503 2024-12-10T00:26:50,506 WARN [master/a67c4886b4f7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/.initializing 2024-12-10T00:26:50,506 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/WALs/a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:50,513 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:50,527 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a67c4886b4f7%2C41433%2C1733790408551, suffix=, logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/WALs/a67c4886b4f7,41433,1733790408551, archiveDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/oldWALs, maxLogs=10 2024-12-10T00:26:50,554 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/WALs/a67c4886b4f7,41433,1733790408551/a67c4886b4f7%2C41433%2C1733790408551.1733790410531, exclude list is [], retry=0 2024-12-10T00:26:50,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:26:50,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:50,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:50,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:50,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T00:26:50,622 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/WALs/a67c4886b4f7,41433,1733790408551/a67c4886b4f7%2C41433%2C1733790408551.1733790410531 2024-12-10T00:26:50,623 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:50,624 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:26:50,624 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:50,628 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,629 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,664 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T00:26:50,689 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:50,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:50,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T00:26:50,696 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:50,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:50,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T00:26:50,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:50,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:50,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T00:26:50,706 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:50,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:50,707 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,710 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,711 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,716 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,717 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,720 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T00:26:50,724 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T00:26:50,728 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:26:50,729 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68727890, jitterRate=0.024125367403030396}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T00:26:50,735 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733790410642Initializing all the Stores at 1733790410644 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733790410645 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790410645Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790410645Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790410645Cleaning up temporary data from old regions at 1733790410717 (+72 ms)Region opened successfully at 1733790410735 (+18 ms) 2024-12-10T00:26:50,736 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T00:26:50,766 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71446235, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a67c4886b4f7/172.17.0.2:0 2024-12-10T00:26:50,792 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T00:26:50,801 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T00:26:50,802 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T00:26:50,804 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T00:26:50,805 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T00:26:50,809 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-10T00:26:50,809 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T00:26:50,831 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T00:26:50,839 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T00:26:50,893 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T00:26:50,896 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T00:26:50,897 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T00:26:50,906 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T00:26:50,908 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T00:26:50,912 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T00:26:50,924 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T00:26:50,927 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T00:26:50,938 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T00:26:50,958 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T00:26:50,969 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:50,985 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a67c4886b4f7,41433,1733790408551, sessionid=0x1000d1e23400000, setting cluster-up flag (Was=false) 2024-12-10T00:26:51,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,044 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T00:26:51,046 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:51,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,106 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T00:26:51,108 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:51,114 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T00:26:51,185 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(746): ClusterId : d36e6add-ccf9-4630-ab24-60fe546afd9a 2024-12-10T00:26:51,185 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(746): ClusterId : d36e6add-ccf9-4630-ab24-60fe546afd9a 2024-12-10T00:26:51,185 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(746): ClusterId : d36e6add-ccf9-4630-ab24-60fe546afd9a 2024-12-10T00:26:51,188 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T00:26:51,188 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T00:26:51,188 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T00:26:51,208 INFO [AsyncFSWAL-0-hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData-prefix:a67c4886b4f7,41433,1733790408551 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-12-10T00:26:51,213 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T00:26:51,213 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T00:26:51,213 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T00:26:51,213 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T00:26:51,213 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T00:26:51,213 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T00:26:51,222 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T00:26:51,223 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T00:26:51,223 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T00:26:51,224 DEBUG [RS:1;a67c4886b4f7:36039 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22355c6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a67c4886b4f7/172.17.0.2:0 2024-12-10T00:26:51,224 DEBUG [RS:0;a67c4886b4f7:39473 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57eb8109, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a67c4886b4f7/172.17.0.2:0 2024-12-10T00:26:51,224 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T00:26:51,225 DEBUG [RS:2;a67c4886b4f7:42867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4491a959, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a67c4886b4f7/172.17.0.2:0 2024-12-10T00:26:51,232 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T00:26:51,239 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T00:26:51,243 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;a67c4886b4f7:42867 2024-12-10T00:26:51,243 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a67c4886b4f7:39473 2024-12-10T00:26:51,243 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a67c4886b4f7:36039 2024-12-10T00:26:51,247 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T00:26:51,247 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T00:26:51,247 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T00:26:51,247 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T00:26:51,247 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T00:26:51,247 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T00:26:51,247 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T00:26:51,247 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T00:26:51,247 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T00:26:51,245 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a67c4886b4f7,41433,1733790408551 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T00:26:51,250 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(2659): reportForDuty to master=a67c4886b4f7,41433,1733790408551 with port=39473, startcode=1733790409727 2024-12-10T00:26:51,250 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(2659): reportForDuty to master=a67c4886b4f7,41433,1733790408551 with port=42867, startcode=1733790410027 2024-12-10T00:26:51,250 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(2659): reportForDuty to master=a67c4886b4f7,41433,1733790408551 with port=36039, startcode=1733790409906 2024-12-10T00:26:51,252 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a67c4886b4f7:0, corePoolSize=5, maxPoolSize=5 2024-12-10T00:26:51,253 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a67c4886b4f7:0, corePoolSize=5, maxPoolSize=5 2024-12-10T00:26:51,253 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a67c4886b4f7:0, corePoolSize=5, maxPoolSize=5 2024-12-10T00:26:51,253 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a67c4886b4f7:0, corePoolSize=5, maxPoolSize=5 2024-12-10T00:26:51,253 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a67c4886b4f7:0, corePoolSize=10, maxPoolSize=10 2024-12-10T00:26:51,253 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,254 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a67c4886b4f7:0, corePoolSize=2, maxPoolSize=2 2024-12-10T00:26:51,254 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,257 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733790441257 2024-12-10T00:26:51,259 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T00:26:51,259 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T00:26:51,260 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T00:26:51,261 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T00:26:51,262 DEBUG [RS:0;a67c4886b4f7:39473 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T00:26:51,262 DEBUG [RS:2;a67c4886b4f7:42867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T00:26:51,262 DEBUG [RS:1;a67c4886b4f7:36039 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T00:26:51,265 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T00:26:51,265 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T00:26:51,265 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T00:26:51,265 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T00:26:51,266 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:51,266 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T00:26:51,274 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,277 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T00:26:51,279 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T00:26:51,279 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T00:26:51,282 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T00:26:51,282 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T00:26:51,284 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a67c4886b4f7:0:becomeActiveMaster-HFileCleaner.large.0-1733790411283,5,FailOnTimeoutGroup] 2024-12-10T00:26:51,293 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a67c4886b4f7:0:becomeActiveMaster-HFileCleaner.small.0-1733790411284,5,FailOnTimeoutGroup] 2024-12-10T00:26:51,293 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,293 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T00:26:51,314 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,314 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741831_1007 (size=1321) 2024-12-10T00:26:51,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741831_1007 (size=1321) 2024-12-10T00:26:51,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741831_1007 (size=1321) 2024-12-10T00:26:51,319 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T00:26:51,319 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507 2024-12-10T00:26:51,325 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:51,325 WARN [IPC Server handler 3 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:51,325 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:51,329 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37911, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T00:26:51,329 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57627, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T00:26:51,329 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35373, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T00:26:51,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741832_1008 (size=32) 2024-12-10T00:26:51,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741832_1008 (size=32) 2024-12-10T00:26:51,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:51,335 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41433 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:51,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T00:26:51,338 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41433 {}] master.ServerManager(517): Registering regionserver=a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:51,339 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T00:26:51,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:51,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:51,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T00:26:51,344 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T00:26:51,344 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:51,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:51,346 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T00:26:51,349 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T00:26:51,349 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:51,350 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:51,351 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T00:26:51,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41433 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a67c4886b4f7,36039,1733790409906 2024-12-10T00:26:51,353 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41433 {}] master.ServerManager(517): Registering regionserver=a67c4886b4f7,36039,1733790409906 2024-12-10T00:26:51,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T00:26:51,355 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:51,356 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507 2024-12-10T00:26:51,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:51,356 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34093 2024-12-10T00:26:51,356 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T00:26:51,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41433 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a67c4886b4f7,39473,1733790409727 2024-12-10T00:26:51,358 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507 2024-12-10T00:26:51,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41433 {}] master.ServerManager(517): Registering regionserver=a67c4886b4f7,39473,1733790409727 2024-12-10T00:26:51,358 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34093 2024-12-10T00:26:51,358 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T00:26:51,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T00:26:51,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740 2024-12-10T00:26:51,362 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740 2024-12-10T00:26:51,364 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507 2024-12-10T00:26:51,364 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34093 2024-12-10T00:26:51,364 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T00:26:51,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T00:26:51,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T00:26:51,369 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T00:26:51,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T00:26:51,377 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:26:51,378 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75277532, jitterRate=0.12172263860702515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T00:26:51,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733790411335Initializing all the Stores at 1733790411336 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733790411336Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733790411336Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790411336Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733790411336Cleaning up temporary data from old regions at 1733790411365 (+29 ms)Region opened successfully at 1733790411380 (+15 ms) 2024-12-10T00:26:51,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T00:26:51,381 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T00:26:51,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T00:26:51,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T00:26:51,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T00:26:51,383 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T00:26:51,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733790411381Disabling compacts and flushes for region at 1733790411381Disabling writes for close at 1733790411381Writing region close event to WAL at 1733790411382 (+1 ms)Closed at 1733790411383 (+1 ms) 2024-12-10T00:26:51,386 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T00:26:51,386 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T00:26:51,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T00:26:51,399 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T00:26:51,403 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T00:26:51,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T00:26:51,430 DEBUG [RS:1;a67c4886b4f7:36039 {}] zookeeper.ZKUtil(111): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a67c4886b4f7,36039,1733790409906 2024-12-10T00:26:51,430 DEBUG [RS:2;a67c4886b4f7:42867 {}] zookeeper.ZKUtil(111): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:51,430 WARN [RS:1;a67c4886b4f7:36039 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T00:26:51,430 WARN [RS:2;a67c4886b4f7:42867 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T00:26:51,430 INFO [RS:1;a67c4886b4f7:36039 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:51,430 INFO [RS:2;a67c4886b4f7:42867 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:51,431 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906 2024-12-10T00:26:51,431 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:51,431 DEBUG [RS:0;a67c4886b4f7:39473 {}] zookeeper.ZKUtil(111): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a67c4886b4f7,39473,1733790409727 2024-12-10T00:26:51,431 WARN [RS:0;a67c4886b4f7:39473 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T00:26:51,432 INFO [RS:0;a67c4886b4f7:39473 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:51,432 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,39473,1733790409727 2024-12-10T00:26:51,433 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a67c4886b4f7,39473,1733790409727] 2024-12-10T00:26:51,433 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a67c4886b4f7,42867,1733790410027] 2024-12-10T00:26:51,433 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a67c4886b4f7,36039,1733790409906] 2024-12-10T00:26:51,462 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T00:26:51,464 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T00:26:51,464 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T00:26:51,478 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T00:26:51,481 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T00:26:51,485 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T00:26:51,489 INFO [RS:0;a67c4886b4f7:39473 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T00:26:51,489 INFO [RS:1;a67c4886b4f7:36039 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T00:26:51,489 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,489 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,490 INFO [RS:2;a67c4886b4f7:42867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T00:26:51,490 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,494 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T00:26:51,494 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T00:26:51,496 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T00:26:51,500 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T00:26:51,500 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T00:26:51,503 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,503 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,503 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T00:26:51,503 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,503 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,504 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0, corePoolSize=2, maxPoolSize=2 2024-12-10T00:26:51,504 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,504 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0, corePoolSize=2, maxPoolSize=2 2024-12-10T00:26:51,505 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,505 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a67c4886b4f7:0, corePoolSize=3, maxPoolSize=3 2024-12-10T00:26:51,505 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a67c4886b4f7:0, corePoolSize=3, maxPoolSize=3 2024-12-10T00:26:51,505 DEBUG [RS:0;a67c4886b4f7:39473 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a67c4886b4f7:0, corePoolSize=3, maxPoolSize=3 2024-12-10T00:26:51,506 DEBUG [RS:2;a67c4886b4f7:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a67c4886b4f7:0, corePoolSize=3, maxPoolSize=3 2024-12-10T00:26:51,506 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,506 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,506 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,506 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,506 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,506 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0, corePoolSize=2, maxPoolSize=2 2024-12-10T00:26:51,507 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,507 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,507 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,507 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,507 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,507 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a67c4886b4f7:0, corePoolSize=1, maxPoolSize=1 2024-12-10T00:26:51,507 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a67c4886b4f7:0, corePoolSize=3, maxPoolSize=3 2024-12-10T00:26:51,508 DEBUG [RS:1;a67c4886b4f7:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a67c4886b4f7:0, corePoolSize=3, maxPoolSize=3 2024-12-10T00:26:51,513 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,513 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,513 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,513 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,513 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,513 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,39473,1733790409727-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T00:26:51,519 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,519 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,519 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,519 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,519 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,519 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,42867,1733790410027-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T00:26:51,537 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,537 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,537 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,537 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,537 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,537 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,36039,1733790409906-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T00:26:51,539 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T00:26:51,542 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,39473,1733790409727-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,542 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,542 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.Replication(171): a67c4886b4f7,39473,1733790409727 started 2024-12-10T00:26:51,549 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T00:26:51,549 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,42867,1733790410027-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,549 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,550 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.Replication(171): a67c4886b4f7,42867,1733790410027 started 2024-12-10T00:26:51,554 WARN [a67c4886b4f7:41433 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-10T00:26:51,559 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T00:26:51,559 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,36039,1733790409906-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,559 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,560 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.Replication(171): a67c4886b4f7,36039,1733790409906 started 2024-12-10T00:26:51,566 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,566 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1482): Serving as a67c4886b4f7,39473,1733790409727, RpcServer on a67c4886b4f7/172.17.0.2:39473, sessionid=0x1000d1e23400001 2024-12-10T00:26:51,567 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T00:26:51,567 DEBUG [RS:0;a67c4886b4f7:39473 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a67c4886b4f7,39473,1733790409727 2024-12-10T00:26:51,568 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a67c4886b4f7,39473,1733790409727' 2024-12-10T00:26:51,568 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T00:26:51,571 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,571 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1482): Serving as a67c4886b4f7,42867,1733790410027, RpcServer on a67c4886b4f7/172.17.0.2:42867, sessionid=0x1000d1e23400003 2024-12-10T00:26:51,571 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T00:26:51,572 DEBUG [RS:2;a67c4886b4f7:42867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:51,572 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a67c4886b4f7,42867,1733790410027' 2024-12-10T00:26:51,572 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T00:26:51,572 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T00:26:51,573 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T00:26:51,573 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T00:26:51,573 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T00:26:51,573 DEBUG [RS:0;a67c4886b4f7:39473 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a67c4886b4f7,39473,1733790409727 2024-12-10T00:26:51,573 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a67c4886b4f7,39473,1733790409727' 2024-12-10T00:26:51,573 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T00:26:51,573 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T00:26:51,574 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T00:26:51,574 DEBUG [RS:2;a67c4886b4f7:42867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:51,574 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a67c4886b4f7,42867,1733790410027' 2024-12-10T00:26:51,574 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T00:26:51,574 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T00:26:51,574 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T00:26:51,575 DEBUG [RS:0;a67c4886b4f7:39473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T00:26:51,575 INFO [RS:0;a67c4886b4f7:39473 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T00:26:51,575 INFO [RS:0;a67c4886b4f7:39473 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T00:26:51,575 DEBUG [RS:2;a67c4886b4f7:42867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T00:26:51,575 INFO [RS:2;a67c4886b4f7:42867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T00:26:51,575 INFO [RS:2;a67c4886b4f7:42867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T00:26:51,582 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:51,582 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1482): Serving as a67c4886b4f7,36039,1733790409906, RpcServer on a67c4886b4f7/172.17.0.2:36039, sessionid=0x1000d1e23400002 2024-12-10T00:26:51,583 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T00:26:51,583 DEBUG [RS:1;a67c4886b4f7:36039 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a67c4886b4f7,36039,1733790409906 2024-12-10T00:26:51,583 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a67c4886b4f7,36039,1733790409906' 2024-12-10T00:26:51,583 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T00:26:51,584 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T00:26:51,585 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T00:26:51,585 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T00:26:51,585 DEBUG [RS:1;a67c4886b4f7:36039 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a67c4886b4f7,36039,1733790409906 2024-12-10T00:26:51,585 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a67c4886b4f7,36039,1733790409906' 2024-12-10T00:26:51,585 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T00:26:51,586 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T00:26:51,586 DEBUG [RS:1;a67c4886b4f7:36039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T00:26:51,586 INFO [RS:1;a67c4886b4f7:36039 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T00:26:51,586 INFO [RS:1;a67c4886b4f7:36039 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T00:26:51,681 INFO [RS:2;a67c4886b4f7:42867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:51,681 INFO [RS:0;a67c4886b4f7:39473 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:51,685 INFO [RS:0;a67c4886b4f7:39473 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a67c4886b4f7%2C39473%2C1733790409727, suffix=, logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,39473,1733790409727, archiveDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs, maxLogs=32 2024-12-10T00:26:51,685 INFO [RS:2;a67c4886b4f7:42867 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a67c4886b4f7%2C42867%2C1733790410027, suffix=, logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,42867,1733790410027, archiveDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs, maxLogs=32 2024-12-10T00:26:51,687 INFO [RS:1;a67c4886b4f7:36039 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:51,691 INFO [RS:1;a67c4886b4f7:36039 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a67c4886b4f7%2C36039%2C1733790409906, suffix=, logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906, archiveDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs, maxLogs=32 2024-12-10T00:26:51,710 DEBUG [RS:0;a67c4886b4f7:39473 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,39473,1733790409727/a67c4886b4f7%2C39473%2C1733790409727.1733790411689, exclude list is [], retry=0 2024-12-10T00:26:51,710 DEBUG [RS:1;a67c4886b4f7:36039 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906/a67c4886b4f7%2C36039%2C1733790409906.1733790411694, exclude list is [], retry=0 2024-12-10T00:26:51,710 DEBUG [RS:2;a67c4886b4f7:42867 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,42867,1733790410027/a67c4886b4f7%2C42867%2C1733790410027.1733790411691, exclude list is [], retry=0 2024-12-10T00:26:51,713 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:51,713 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:51,713 WARN [IPC Server handler 3 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:51,713 WARN [IPC Server handler 0 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:51,713 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:51,713 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:51,713 WARN [IPC Server handler 1 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:51,713 WARN [IPC Server handler 1 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:51,714 WARN [IPC Server handler 1 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:51,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:51,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:51,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:51,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:51,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:51,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:51,726 INFO [RS:2;a67c4886b4f7:42867 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,42867,1733790410027/a67c4886b4f7%2C42867%2C1733790410027.1733790411691 2024-12-10T00:26:51,729 DEBUG [RS:2;a67c4886b4f7:42867 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:26:51,733 INFO [RS:1;a67c4886b4f7:36039 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 2024-12-10T00:26:51,733 INFO [RS:0;a67c4886b4f7:39473 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,39473,1733790409727/a67c4886b4f7%2C39473%2C1733790409727.1733790411689 2024-12-10T00:26:51,734 DEBUG [RS:0;a67c4886b4f7:39473 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:26:51,734 DEBUG [RS:1;a67c4886b4f7:36039 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:26:51,808 DEBUG [a67c4886b4f7:41433 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T00:26:51,822 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(204): Hosts are {a67c4886b4f7=0} racks are {/default-rack=0} 2024-12-10T00:26:51,828 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T00:26:51,828 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T00:26:51,828 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T00:26:51,828 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T00:26:51,828 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T00:26:51,828 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T00:26:51,828 INFO [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T00:26:51,828 INFO [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T00:26:51,828 INFO [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T00:26:51,828 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T00:26:51,835 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:51,839 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a67c4886b4f7,42867,1733790410027, state=OPENING 2024-12-10T00:26:51,853 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T00:26:51,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:26:51,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:51,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:51,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:51,876 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:51,878 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T00:26:51,880 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a67c4886b4f7,42867,1733790410027}] 2024-12-10T00:26:52,056 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T00:26:52,058 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59355, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T00:26:52,071 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T00:26:52,072 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:52,073 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T00:26:52,076 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a67c4886b4f7%2C42867%2C1733790410027.meta, suffix=.meta, logDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,42867,1733790410027, archiveDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs, maxLogs=32 2024-12-10T00:26:52,098 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,42867,1733790410027/a67c4886b4f7%2C42867%2C1733790410027.meta.1733790412079.meta, exclude list is [], retry=0 2024-12-10T00:26:52,100 WARN [IPC Server handler 1 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:52,100 WARN [IPC Server handler 1 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:52,100 WARN [IPC Server handler 1 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:52,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:52,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:52,106 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,42867,1733790410027/a67c4886b4f7%2C42867%2C1733790410027.meta.1733790412079.meta 2024-12-10T00:26:52,107 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:52,108 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:26:52,110 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T00:26:52,113 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T00:26:52,119 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T00:26:52,124 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T00:26:52,125 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:52,125 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T00:26:52,125 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T00:26:52,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T00:26:52,131 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T00:26:52,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:52,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:52,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T00:26:52,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T00:26:52,135 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:52,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:52,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T00:26:52,138 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T00:26:52,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:52,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:52,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T00:26:52,141 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T00:26:52,141 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:52,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T00:26:52,142 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T00:26:52,143 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740 2024-12-10T00:26:52,146 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740 2024-12-10T00:26:52,149 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T00:26:52,149 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T00:26:52,150 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T00:26:52,153 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T00:26:52,155 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72689571, jitterRate=0.08315901458263397}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T00:26:52,156 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T00:26:52,157 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733790412126Writing region info on filesystem at 1733790412126Initializing all the Stores at 1733790412128 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733790412128Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733790412128Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790412129 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733790412129Cleaning up temporary data from old regions at 1733790412149 (+20 ms)Running coprocessor post-open hooks at 1733790412156 (+7 ms)Region opened successfully at 1733790412157 (+1 ms) 2024-12-10T00:26:52,164 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733790412048 2024-12-10T00:26:52,177 DEBUG [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T00:26:52,178 INFO [RS_OPEN_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T00:26:52,180 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:52,183 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a67c4886b4f7,42867,1733790410027, state=OPEN 2024-12-10T00:26:52,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T00:26:52,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T00:26:52,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T00:26:52,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T00:26:52,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:52,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:52,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:52,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T00:26:52,192 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a67c4886b4f7,42867,1733790410027 2024-12-10T00:26:52,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T00:26:52,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a67c4886b4f7,42867,1733790410027 in 312 msec 2024-12-10T00:26:52,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T00:26:52,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 808 msec 2024-12-10T00:26:52,209 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T00:26:52,209 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T00:26:52,232 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T00:26:52,233 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a67c4886b4f7,42867,1733790410027, seqNum=-1] 2024-12-10T00:26:52,260 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T00:26:52,263 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35145, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T00:26:52,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1310 sec 2024-12-10T00:26:52,286 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733790412286, completionTime=-1 2024-12-10T00:26:52,290 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T00:26:52,290 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T00:26:52,351 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T00:26:52,351 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733790472351 2024-12-10T00:26:52,351 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733790532351 2024-12-10T00:26:52,351 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 60 msec 2024-12-10T00:26:52,352 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-10T00:26:52,363 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,41433,1733790408551-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:52,364 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,41433,1733790408551-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:52,364 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,41433,1733790408551-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:52,366 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a67c4886b4f7:41433, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:52,367 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:52,368 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:52,374 DEBUG [master/a67c4886b4f7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T00:26:52,400 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.187sec 2024-12-10T00:26:52,401 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T00:26:52,403 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T00:26:52,403 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T00:26:52,404 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T00:26:52,404 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T00:26:52,405 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,41433,1733790408551-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T00:26:52,405 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,41433,1733790408551-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T00:26:52,410 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T00:26:52,411 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T00:26:52,412 INFO [master/a67c4886b4f7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a67c4886b4f7,41433,1733790408551-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T00:26:52,498 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ed908a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T00:26:52,498 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a67c4886b4f7,41433,-1 for getting cluster id 2024-12-10T00:26:52,501 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T00:26:52,510 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd36e6add-ccf9-4630-ab24-60fe546afd9a' 2024-12-10T00:26:52,512 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T00:26:52,513 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d36e6add-ccf9-4630-ab24-60fe546afd9a" 2024-12-10T00:26:52,513 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@732b1571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T00:26:52,513 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a67c4886b4f7,41433,-1] 2024-12-10T00:26:52,516 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T00:26:52,518 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:26:52,520 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51870, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T00:26:52,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769aece, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T00:26:52,523 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T00:26:52,529 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a67c4886b4f7,42867,1733790410027, seqNum=-1] 2024-12-10T00:26:52,530 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T00:26:52,532 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56924, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T00:26:52,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a67c4886b4f7,41433,1733790408551 2024-12-10T00:26:52,550 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:34093/hbase 2024-12-10T00:26:52,563 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=355, OpenFileDescriptor=587, MaxFileDescriptor=1048576, SystemLoadAverage=293, ProcessCount=11, AvailableMemoryMB=8203 2024-12-10T00:26:52,581 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:52,585 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:52,587 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:52,591 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-25773563, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-25773563, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:52,604 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-25773563/hregion-25773563.1733790412592, exclude list is [], retry=0 2024-12-10T00:26:52,606 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:52,607 WARN [IPC Server handler 3 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:52,607 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:52,609 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:52,609 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:52,613 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-25773563/hregion-25773563.1733790412592 2024-12-10T00:26:52,616 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:52,616 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => eaf6c0696cdc480c3d349dd818b5960c, NAME => 'testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:26:52,622 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:52,622 WARN [IPC Server handler 0 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:52,622 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:52,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741838_1014 (size=64) 2024-12-10T00:26:52,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741838_1014 (size=64) 2024-12-10T00:26:52,633 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:52,635 INFO [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,638 INFO [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eaf6c0696cdc480c3d349dd818b5960c columnFamilyName a 2024-12-10T00:26:52,638 DEBUG [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:52,639 INFO [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] regionserver.HStore(327): Store=eaf6c0696cdc480c3d349dd818b5960c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:52,639 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,640 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,641 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,642 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,642 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,644 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,648 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:26:52,649 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened eaf6c0696cdc480c3d349dd818b5960c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75074152, jitterRate=0.11869204044342041}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:26:52,650 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for eaf6c0696cdc480c3d349dd818b5960c: Writing region info on filesystem at 1733790412633Initializing all the Stores at 1733790412634 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790412634Cleaning up temporary data from old regions at 1733790412642 (+8 ms)Region opened successfully at 1733790412650 (+8 ms) 2024-12-10T00:26:52,650 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing eaf6c0696cdc480c3d349dd818b5960c, disabling compactions & flushes 2024-12-10T00:26:52,650 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c. 2024-12-10T00:26:52,650 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c. 2024-12-10T00:26:52,650 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c. after waiting 0 ms 2024-12-10T00:26:52,650 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c. 2024-12-10T00:26:52,653 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c. 2024-12-10T00:26:52,653 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for eaf6c0696cdc480c3d349dd818b5960c: Waiting for close lock at 1733790412650Disabling compacts and flushes for region at 1733790412650Disabling writes for close at 1733790412650Writing region close event to WAL at 1733790412653 (+3 ms)Closed at 1733790412653 2024-12-10T00:26:52,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741837_1013 (size=95) 2024-12-10T00:26:52,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741837_1013 (size=95) 2024-12-10T00:26:52,666 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:26:52,666 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-25773563:(num 1733790412592) 2024-12-10T00:26:52,668 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T00:26:52,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741839_1015 (size=320) 2024-12-10T00:26:52,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741839_1015 (size=320) 2024-12-10T00:26:52,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741839_1015 (size=320) 2024-12-10T00:26:52,688 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T00:26:52,691 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:52,691 WARN [IPC Server handler 3 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:52,691 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:52,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741840_1016 (size=253) 2024-12-10T00:26:52,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741840_1016 (size=253) 2024-12-10T00:26:52,725 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1, size=320 (320bytes) 2024-12-10T00:26:52,726 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-10T00:26:52,726 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-10T00:26:52,727 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1 2024-12-10T00:26:52,732 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1 after 3ms 2024-12-10T00:26:52,737 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:52,738 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1 took 14ms 2024-12-10T00:26:52,749 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1 so closing down 2024-12-10T00:26:52,749 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:26:52,752 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-10T00:26:52,756 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000001-wal-1.temp 2024-12-10T00:26:52,757 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:26:52,759 WARN [IPC Server handler 4 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:52,759 WARN [IPC Server handler 4 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:52,759 WARN [IPC Server handler 4 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:52,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741841_1017 (size=320) 2024-12-10T00:26:52,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741841_1017 (size=320) 2024-12-10T00:26:52,772 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-10T00:26:52,775 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002 2024-12-10T00:26:52,780 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 36 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-10T00:26:52,781 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1, journal: Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1, size=320 (320bytes) at 1733790412726Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1 so closing down at 1733790412749 (+23 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000001-wal-1.temp at 1733790412756 (+7 ms)3 split writer threads finished at 1733790412757 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733790412772 (+15 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002 at 1733790412775 (+3 ms)Processed 2 edits across 1 Regions in 36 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733790412780 (+5 ms) 2024-12-10T00:26:52,803 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2, size=253 (253bytes) 2024-12-10T00:26:52,803 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2 2024-12-10T00:26:52,804 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2 after 1ms 2024-12-10T00:26:52,809 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:52,809 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2 took 7ms 2024-12-10T00:26:52,814 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2 so closing down 2024-12-10T00:26:52,814 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:26:52,817 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-10T00:26:52,819 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002-wal-2.temp 2024-12-10T00:26:52,820 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:26:52,824 WARN [IPC Server handler 2 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:52,824 WARN [IPC Server handler 2 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:52,824 WARN [IPC Server handler 2 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:52,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741842_1018 (size=253) 2024-12-10T00:26:52,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741842_1018 (size=253) 2024-12-10T00:26:52,836 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-10T00:26:52,840 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:52,842 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-12-10T00:26:52,844 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 33 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-10T00:26:52,844 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2, journal: Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2, size=253 (253bytes) at 1733790412803Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2 so closing down at 1733790412814 (+11 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002-wal-2.temp at 1733790412819 (+5 ms)3 split writer threads finished at 1733790412820 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733790412836 (+16 ms)Processed 1 edits across 1 Regions in 33 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733790412844 (+8 ms) 2024-12-10T00:26:52,844 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:26:52,846 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:52,859 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal.1733790412847, exclude list is [], retry=0 2024-12-10T00:26:52,861 WARN [IPC Server handler 4 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:52,861 WARN [IPC Server handler 4 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:52,861 WARN [IPC Server handler 4 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:52,863 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:52,864 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:52,868 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal.1733790412847 2024-12-10T00:26:52,869 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:26:52,869 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => eaf6c0696cdc480c3d349dd818b5960c, NAME => 'testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:26:52,869 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:52,869 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,869 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,872 INFO [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,874 INFO [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eaf6c0696cdc480c3d349dd818b5960c columnFamilyName a 2024-12-10T00:26:52,875 DEBUG [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:52,876 INFO [StoreOpener-eaf6c0696cdc480c3d349dd818b5960c-1 {}] regionserver.HStore(327): Store=eaf6c0696cdc480c3d349dd818b5960c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:52,876 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,877 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,880 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:52,881 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002 2024-12-10T00:26:52,885 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:52,891 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002 2024-12-10T00:26:52,894 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing eaf6c0696cdc480c3d349dd818b5960c 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-10T00:26:52,946 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/.tmp/a/6b4e6171b76b4f71afe27bbde168958d is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733790412666/Put/seqid=0 2024-12-10T00:26:52,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741844_1020 (size=5170) 2024-12-10T00:26:52,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741844_1020 (size=5170) 2024-12-10T00:26:52,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741844_1020 (size=5170) 2024-12-10T00:26:52,963 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/.tmp/a/6b4e6171b76b4f71afe27bbde168958d 2024-12-10T00:26:53,014 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/.tmp/a/6b4e6171b76b4f71afe27bbde168958d as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/a/6b4e6171b76b4f71afe27bbde168958d 2024-12-10T00:26:53,023 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/a/6b4e6171b76b4f71afe27bbde168958d, entries=2, sequenceid=2, filesize=5.0 K 2024-12-10T00:26:53,028 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for eaf6c0696cdc480c3d349dd818b5960c in 133ms, sequenceid=2, compaction requested=false; wal=null 2024-12-10T00:26:53,031 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/0000000000000000002 2024-12-10T00:26:53,032 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:53,032 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:53,036 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for eaf6c0696cdc480c3d349dd818b5960c 2024-12-10T00:26:53,040 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/eaf6c0696cdc480c3d349dd818b5960c/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-10T00:26:53,042 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened eaf6c0696cdc480c3d349dd818b5960c; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66151767, jitterRate=-0.01426185667514801}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:26:53,043 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for eaf6c0696cdc480c3d349dd818b5960c: Writing region info on filesystem at 1733790412870Initializing all the Stores at 1733790412872 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790412872Obtaining lock to block concurrent updates at 1733790412894 (+22 ms)Preparing flush snapshotting stores in eaf6c0696cdc480c3d349dd818b5960c at 1733790412894Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733790412897 (+3 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733790412582.eaf6c0696cdc480c3d349dd818b5960c. at 1733790412897Flushing eaf6c0696cdc480c3d349dd818b5960c/a: creating writer at 1733790412898 (+1 ms)Flushing eaf6c0696cdc480c3d349dd818b5960c/a: appending metadata at 1733790412933 (+35 ms)Flushing eaf6c0696cdc480c3d349dd818b5960c/a: closing flushed file at 1733790412935 (+2 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b2d903: reopening flushed file at 1733790413011 (+76 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for eaf6c0696cdc480c3d349dd818b5960c in 133ms, sequenceid=2, compaction requested=false; wal=null at 1733790413029 (+18 ms)Cleaning up temporary data from old regions at 1733790413032 (+3 ms)Region opened successfully at 1733790413043 (+11 ms) 2024-12-10T00:26:53,068 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=363 (was 355) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:59634 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:34093/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:59822 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:39902 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:39828 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=673 (was 587) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=293 (was 293), ProcessCount=11 (was 11), AvailableMemoryMB=8175 (was 8203) 2024-12-10T00:26:53,078 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=363, OpenFileDescriptor=673, MaxFileDescriptor=1048576, SystemLoadAverage=293, ProcessCount=11, AvailableMemoryMB=8174 2024-12-10T00:26:53,093 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:53,096 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:53,097 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:53,101 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-68508456, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-68508456, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:53,114 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-68508456/hregion-68508456.1733790413102, exclude list is [], retry=0 2024-12-10T00:26:53,117 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:53,117 WARN [IPC Server handler 0 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:53,117 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:53,119 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:53,119 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:53,121 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-68508456/hregion-68508456.1733790413102 2024-12-10T00:26:53,122 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:53,122 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 3609a3a8683f7128a445164b1defa423, NAME => 'testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:26:53,130 WARN [IPC Server handler 2 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:53,130 WARN [IPC Server handler 2 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:53,130 WARN [IPC Server handler 2 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:53,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741846_1022 (size=64) 2024-12-10T00:26:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741846_1022 (size=64) 2024-12-10T00:26:53,138 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:53,140 INFO [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,143 INFO [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3609a3a8683f7128a445164b1defa423 columnFamilyName a 2024-12-10T00:26:53,143 DEBUG [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:53,144 INFO [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] regionserver.HStore(327): Store=3609a3a8683f7128a445164b1defa423/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:53,144 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,145 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,146 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,147 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,147 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,149 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,152 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:26:53,153 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3609a3a8683f7128a445164b1defa423; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66452826, jitterRate=-0.009775727987289429}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:26:53,153 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3609a3a8683f7128a445164b1defa423: Writing region info on filesystem at 1733790413138Initializing all the Stores at 1733790413139 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790413139Cleaning up temporary data from old regions at 1733790413147 (+8 ms)Region opened successfully at 1733790413153 (+6 ms) 2024-12-10T00:26:53,153 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3609a3a8683f7128a445164b1defa423, disabling compactions & flushes 2024-12-10T00:26:53,154 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423. 2024-12-10T00:26:53,154 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423. 2024-12-10T00:26:53,154 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423. after waiting 0 ms 2024-12-10T00:26:53,154 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423. 2024-12-10T00:26:53,154 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423. 2024-12-10T00:26:53,154 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3609a3a8683f7128a445164b1defa423: Waiting for close lock at 1733790413153Disabling compacts and flushes for region at 1733790413153Disabling writes for close at 1733790413154 (+1 ms)Writing region close event to WAL at 1733790413154Closed at 1733790413154 2024-12-10T00:26:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741845_1021 (size=95) 2024-12-10T00:26:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741845_1021 (size=95) 2024-12-10T00:26:53,162 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:26:53,162 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-68508456:(num 1733790413102) 2024-12-10T00:26:53,163 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T00:26:53,165 WARN [IPC Server handler 4 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:53,165 WARN [IPC Server handler 4 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:53,165 WARN [IPC Server handler 4 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:53,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741847_1023 (size=320) 2024-12-10T00:26:53,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741847_1023 (size=320) 2024-12-10T00:26:53,174 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T00:26:53,177 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:53,177 WARN [IPC Server handler 3 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:53,177 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:53,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741848_1024 (size=253) 2024-12-10T00:26:53,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741848_1024 (size=253) 2024-12-10T00:26:53,202 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2, size=253 (253bytes) 2024-12-10T00:26:53,202 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2 2024-12-10T00:26:53,204 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2 after 1ms 2024-12-10T00:26:53,208 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:53,209 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2 took 7ms 2024-12-10T00:26:53,211 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2 so closing down 2024-12-10T00:26:53,212 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:26:53,214 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-10T00:26:53,216 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002-wal-2.temp 2024-12-10T00:26:53,217 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:26:53,218 WARN [IPC Server handler 1 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:53,218 WARN [IPC Server handler 1 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:53,219 WARN [IPC Server handler 1 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:53,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741849_1025 (size=253) 2024-12-10T00:26:53,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741849_1025 (size=253) 2024-12-10T00:26:53,224 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-10T00:26:53,226 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002 2024-12-10T00:26:53,226 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-10T00:26:53,226 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2, journal: Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2, size=253 (253bytes) at 1733790413202Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2 so closing down at 1733790413212 (+10 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002-wal-2.temp at 1733790413216 (+4 ms)3 split writer threads finished at 1733790413217 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733790413224 (+7 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002 at 1733790413226 (+2 ms)Processed 1 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733790413226 2024-12-10T00:26:53,239 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1, size=320 (320bytes) 2024-12-10T00:26:53,240 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1 2024-12-10T00:26:53,240 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1 after 0ms 2024-12-10T00:26:53,244 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:53,244 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1 took 5ms 2024-12-10T00:26:53,247 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1 so closing down 2024-12-10T00:26:53,247 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:26:53,249 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-10T00:26:53,250 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000001-wal-1.temp 2024-12-10T00:26:53,250 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:26:53,252 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:53,252 WARN [IPC Server handler 0 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:53,252 WARN [IPC Server handler 0 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:53,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741850_1026 (size=320) 2024-12-10T00:26:53,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741850_1026 (size=320) 2024-12-10T00:26:53,259 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-10T00:26:53,264 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:53,266 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002, length=253 2024-12-10T00:26:53,269 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002 2024-12-10T00:26:53,269 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 25 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-10T00:26:53,270 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1, journal: Splitting hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1, size=320 (320bytes) at 1733790413239Finishing writing output for hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1 so closing down at 1733790413247 (+8 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000001-wal-1.temp at 1733790413250 (+3 ms)3 split writer threads finished at 1733790413250Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733790413259 (+9 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002 at 1733790413269 (+10 ms)Processed 2 edits across 1 Regions in 25 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733790413269 2024-12-10T00:26:53,270 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:26:53,272 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:53,285 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal.1733790413273, exclude list is [], retry=0 2024-12-10T00:26:53,289 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:53,290 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:53,291 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:53,293 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal.1733790413273 2024-12-10T00:26:53,293 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:26:53,293 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 3609a3a8683f7128a445164b1defa423, NAME => 'testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:26:53,293 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:53,293 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,293 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,295 INFO [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,298 INFO [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3609a3a8683f7128a445164b1defa423 columnFamilyName a 2024-12-10T00:26:53,298 DEBUG [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:53,299 INFO [StoreOpener-3609a3a8683f7128a445164b1defa423-1 {}] regionserver.HStore(327): Store=3609a3a8683f7128a445164b1defa423/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:53,299 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,300 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,302 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,303 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002 2024-12-10T00:26:53,306 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:53,308 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002 2024-12-10T00:26:53,308 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3609a3a8683f7128a445164b1defa423 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-10T00:26:53,325 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/.tmp/a/946d8e3f4ebf4107af45bd82f6f04e5d is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733790413162/Put/seqid=0 2024-12-10T00:26:53,326 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T00:26:53,327 WARN [IPC Server handler 3 on default port 34093 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T00:26:53,327 WARN [IPC Server handler 3 on default port 34093 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T00:26:53,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741852_1028 (size=5170) 2024-12-10T00:26:53,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741852_1028 (size=5170) 2024-12-10T00:26:53,333 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/.tmp/a/946d8e3f4ebf4107af45bd82f6f04e5d 2024-12-10T00:26:53,341 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/.tmp/a/946d8e3f4ebf4107af45bd82f6f04e5d as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/a/946d8e3f4ebf4107af45bd82f6f04e5d 2024-12-10T00:26:53,350 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/a/946d8e3f4ebf4107af45bd82f6f04e5d, entries=2, sequenceid=2, filesize=5.0 K 2024-12-10T00:26:53,350 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 3609a3a8683f7128a445164b1defa423 in 42ms, sequenceid=2, compaction requested=false; wal=null 2024-12-10T00:26:53,351 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/0000000000000000002 2024-12-10T00:26:53,352 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,352 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,355 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3609a3a8683f7128a445164b1defa423 2024-12-10T00:26:53,359 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/3609a3a8683f7128a445164b1defa423/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-10T00:26:53,360 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3609a3a8683f7128a445164b1defa423; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66781174, jitterRate=-0.0048829615116119385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:26:53,360 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3609a3a8683f7128a445164b1defa423: Writing region info on filesystem at 1733790413293Initializing all the Stores at 1733790413295 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790413295Obtaining lock to block concurrent updates at 1733790413308 (+13 ms)Preparing flush snapshotting stores in 3609a3a8683f7128a445164b1defa423 at 1733790413308Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733790413309 (+1 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733790413094.3609a3a8683f7128a445164b1defa423. at 1733790413309Flushing 3609a3a8683f7128a445164b1defa423/a: creating writer at 1733790413309Flushing 3609a3a8683f7128a445164b1defa423/a: appending metadata at 1733790413324 (+15 ms)Flushing 3609a3a8683f7128a445164b1defa423/a: closing flushed file at 1733790413324Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@358851f7: reopening flushed file at 1733790413340 (+16 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 3609a3a8683f7128a445164b1defa423 in 42ms, sequenceid=2, compaction requested=false; wal=null at 1733790413350 (+10 ms)Cleaning up temporary data from old regions at 1733790413352 (+2 ms)Region opened successfully at 1733790413360 (+8 ms) 2024-12-10T00:26:53,376 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=371 (was 363) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:59634 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:39976 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:59900 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:47728 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:39828 [Waiting for operation #24] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=747 (was 673) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=293 (was 293), ProcessCount=11 (was 11), AvailableMemoryMB=8163 (was 8174) 2024-12-10T00:26:53,387 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=371, OpenFileDescriptor=747, MaxFileDescriptor=1048576, SystemLoadAverage=293, ProcessCount=11, AvailableMemoryMB=8162 2024-12-10T00:26:53,405 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:53,408 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:53,409 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:53,413 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-65479990, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-65479990, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:53,430 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-65479990/hregion-65479990.1733790413413, exclude list is [], retry=0 2024-12-10T00:26:53,434 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:53,434 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:53,434 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:53,437 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-65479990/hregion-65479990.1733790413413 2024-12-10T00:26:53,438 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:26:53,439 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 8a99b4b7cca80643fea86456a286fda3, NAME => 'testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:26:53,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741854_1030 (size=64) 2024-12-10T00:26:53,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741854_1030 (size=64) 2024-12-10T00:26:53,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741854_1030 (size=64) 2024-12-10T00:26:53,456 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:53,458 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,460 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a99b4b7cca80643fea86456a286fda3 columnFamilyName a 2024-12-10T00:26:53,460 DEBUG [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:53,461 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(327): Store=8a99b4b7cca80643fea86456a286fda3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:53,461 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,463 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a99b4b7cca80643fea86456a286fda3 columnFamilyName b 2024-12-10T00:26:53,463 DEBUG [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:53,464 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(327): Store=8a99b4b7cca80643fea86456a286fda3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:53,465 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,467 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a99b4b7cca80643fea86456a286fda3 columnFamilyName c 2024-12-10T00:26:53,467 DEBUG [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:53,468 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(327): Store=8a99b4b7cca80643fea86456a286fda3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:53,468 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,470 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,471 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,472 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,472 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,473 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:26:53,476 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:53,480 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:26:53,481 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8a99b4b7cca80643fea86456a286fda3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70325962, jitterRate=0.04793849587440491}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:26:53,481 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8a99b4b7cca80643fea86456a286fda3: Writing region info on filesystem at 1733790413456Initializing all the Stores at 1733790413457 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790413457Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790413457Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790413458 (+1 ms)Cleaning up temporary data from old regions at 1733790413472 (+14 ms)Region opened successfully at 1733790413481 (+9 ms) 2024-12-10T00:26:53,482 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 8a99b4b7cca80643fea86456a286fda3, disabling compactions & flushes 2024-12-10T00:26:53,482 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:53,482 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:53,482 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. after waiting 0 ms 2024-12-10T00:26:53,482 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:53,484 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:53,484 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 8a99b4b7cca80643fea86456a286fda3: Waiting for close lock at 1733790413482Disabling compacts and flushes for region at 1733790413482Disabling writes for close at 1733790413482Writing region close event to WAL at 1733790413484 (+2 ms)Closed at 1733790413484 2024-12-10T00:26:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741853_1029 (size=95) 2024-12-10T00:26:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741853_1029 (size=95) 2024-12-10T00:26:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741853_1029 (size=95) 2024-12-10T00:26:53,492 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:26:53,493 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-65479990:(num 1733790413413) 2024-12-10T00:26:53,493 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:26:53,496 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:53,510 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496, exclude list is [], retry=0 2024-12-10T00:26:53,514 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:53,515 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:53,515 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:53,519 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 2024-12-10T00:26:53,521 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:53,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741828_1004 (size=1189) 2024-12-10T00:26:53,822 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496, size=0 (0bytes) 2024-12-10T00:26:53,822 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 might be still open, length is 0 2024-12-10T00:26:53,823 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 2024-12-10T00:26:53,825 WARN [IPC Server handler 4 on default port 34093 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-12-10T00:26:53,827 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 after 3ms 2024-12-10T00:26:56,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741832_1008 (size=32) 2024-12-10T00:26:56,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:47748 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:34693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47748 dst: /127.0.0.1:34693 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34693 remote=/127.0.0.1:47748]. Total timeout mills is 60000, 57128 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:26:56,645 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:40024 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:35811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40024 dst: /127.0.0.1:35811 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:26:56,645 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:59934 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:37237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59934 dst: /127.0.0.1:37237 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:26:56,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741855_1032 (size=263633) 2024-12-10T00:26:56,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741855_1032 (size=263633) 2024-12-10T00:26:56,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741855_1032 (size=263633) 2024-12-10T00:26:57,638 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T00:26:57,687 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T00:26:57,828 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 after 4005ms 2024-12-10T00:26:57,836 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:57,838 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 took 4016ms 2024-12-10T00:26:57,845 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733790413496.temp 2024-12-10T00:26:57,846 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000000001-wal.1733790413496.temp 2024-12-10T00:26:57,964 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496; continuing. 2024-12-10T00:26:57,964 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 so closing down 2024-12-10T00:26:57,964 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:26:57,964 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:26:57,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741856_1033 (size=263641) 2024-12-10T00:26:57,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741856_1033 (size=263641) 2024-12-10T00:26:57,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741856_1033 (size=263641) 2024-12-10T00:26:57,968 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000000001-wal.1733790413496.temp (wrote 3002 edits, skipped 0 edits in 67 ms) 2024-12-10T00:26:57,971 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000000001-wal.1733790413496.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000003002 2024-12-10T00:26:57,971 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 132 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T00:26:57,971 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496, journal: Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496, size=0 (0bytes) at 1733790413822Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000000001-wal.1733790413496.temp at 1733790417846 (+4024 ms)Split 1024 edits, skipped 0 edits. at 1733790417912 (+66 ms)Split 2048 edits, skipped 0 edits. at 1733790417939 (+27 ms)Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 so closing down at 1733790417964 (+25 ms)3 split writer threads finished at 1733790417964Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000000001-wal.1733790413496.temp (wrote 3002 edits, skipped 0 edits in 67 ms) at 1733790417969 (+5 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000000001-wal.1733790413496.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000003002 at 1733790417971 (+2 ms)Processed 3002 edits across 1 Regions in 132 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496, size=0, length=0, corrupted=false, cancelled=false at 1733790417971 2024-12-10T00:26:57,974 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790413496 2024-12-10T00:26:57,975 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000003002 2024-12-10T00:26:57,975 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:26:57,977 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:57,994 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790417978, exclude list is [], retry=0 2024-12-10T00:26:57,998 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:57,999 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:58,000 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:58,002 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790417978 2024-12-10T00:26:58,003 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:58,003 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:58,006 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,008 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a99b4b7cca80643fea86456a286fda3 columnFamilyName a 2024-12-10T00:26:58,008 DEBUG [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:58,009 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(327): Store=8a99b4b7cca80643fea86456a286fda3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:58,009 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,010 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a99b4b7cca80643fea86456a286fda3 columnFamilyName b 2024-12-10T00:26:58,010 DEBUG [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:58,011 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(327): Store=8a99b4b7cca80643fea86456a286fda3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:58,011 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,013 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a99b4b7cca80643fea86456a286fda3 columnFamilyName c 2024-12-10T00:26:58,013 DEBUG [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:58,013 INFO [StoreOpener-8a99b4b7cca80643fea86456a286fda3-1 {}] regionserver.HStore(327): Store=8a99b4b7cca80643fea86456a286fda3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:58,014 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,015 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,017 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,018 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000003002 2024-12-10T00:26:58,022 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:58,056 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T00:26:58,394 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8a99b4b7cca80643fea86456a286fda3 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-10T00:26:58,432 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/a/ac8838c5eb69426a8202e0df088ae31b is 62, key is testReplayEditsWrittenIntoWAL/a:100/1733790413531/Put/seqid=0 2024-12-10T00:26:58,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741858_1035 (size=50463) 2024-12-10T00:26:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741858_1035 (size=50463) 2024-12-10T00:26:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741858_1035 (size=50463) 2024-12-10T00:26:58,442 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/a/ac8838c5eb69426a8202e0df088ae31b 2024-12-10T00:26:58,450 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/a/ac8838c5eb69426a8202e0df088ae31b as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/a/ac8838c5eb69426a8202e0df088ae31b 2024-12-10T00:26:58,458 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/a/ac8838c5eb69426a8202e0df088ae31b, entries=754, sequenceid=754, filesize=49.3 K 2024-12-10T00:26:58,458 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 8a99b4b7cca80643fea86456a286fda3 in 65ms, sequenceid=754, compaction requested=false; wal=null 2024-12-10T00:26:58,482 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T00:26:58,483 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8a99b4b7cca80643fea86456a286fda3 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-10T00:26:58,493 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/a/9b35d92a4bb44583b40e9e4fc14153cf is 62, key is testReplayEditsWrittenIntoWAL/a:754/1733790413596/Put/seqid=0 2024-12-10T00:26:58,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741859_1036 (size=20072) 2024-12-10T00:26:58,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741859_1036 (size=20072) 2024-12-10T00:26:58,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741859_1036 (size=20072) 2024-12-10T00:26:58,508 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/a/9b35d92a4bb44583b40e9e4fc14153cf 2024-12-10T00:26:58,534 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/b/faa298c5ec334787b9e691edd446480f is 62, key is testReplayEditsWrittenIntoWAL/b:100/1733790413638/Put/seqid=0 2024-12-10T00:26:58,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741860_1037 (size=35835) 2024-12-10T00:26:58,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741860_1037 (size=35835) 2024-12-10T00:26:58,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741860_1037 (size=35835) 2024-12-10T00:26:58,548 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/b/faa298c5ec334787b9e691edd446480f 2024-12-10T00:26:58,556 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/a/9b35d92a4bb44583b40e9e4fc14153cf as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/a/9b35d92a4bb44583b40e9e4fc14153cf 2024-12-10T00:26:58,564 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/a/9b35d92a4bb44583b40e9e4fc14153cf, entries=246, sequenceid=1508, filesize=19.6 K 2024-12-10T00:26:58,566 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/b/faa298c5ec334787b9e691edd446480f as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/b/faa298c5ec334787b9e691edd446480f 2024-12-10T00:26:58,575 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/b/faa298c5ec334787b9e691edd446480f, entries=508, sequenceid=1508, filesize=35.0 K 2024-12-10T00:26:58,575 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 8a99b4b7cca80643fea86456a286fda3 in 92ms, sequenceid=1508, compaction requested=false; wal=null 2024-12-10T00:26:58,593 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T00:26:58,594 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8a99b4b7cca80643fea86456a286fda3 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-10T00:26:58,601 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/b/fdb7b1ddcef14acda52d9c0ab9af567a is 62, key is testReplayEditsWrittenIntoWAL/b:508/1733790413669/Put/seqid=0 2024-12-10T00:26:58,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741861_1038 (size=35082) 2024-12-10T00:26:58,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741861_1038 (size=35082) 2024-12-10T00:26:58,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741861_1038 (size=35082) 2024-12-10T00:26:58,615 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/b/fdb7b1ddcef14acda52d9c0ab9af567a 2024-12-10T00:26:58,641 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/c/f2c40ec553a64f378a7f0436603d68cd is 62, key is testReplayEditsWrittenIntoWAL/c:100/1733790413716/Put/seqid=0 2024-12-10T00:26:58,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741862_1039 (size=20825) 2024-12-10T00:26:58,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741862_1039 (size=20825) 2024-12-10T00:26:58,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741862_1039 (size=20825) 2024-12-10T00:26:58,651 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/c/f2c40ec553a64f378a7f0436603d68cd 2024-12-10T00:26:58,659 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/b/fdb7b1ddcef14acda52d9c0ab9af567a as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/b/fdb7b1ddcef14acda52d9c0ab9af567a 2024-12-10T00:26:58,668 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/b/fdb7b1ddcef14acda52d9c0ab9af567a, entries=492, sequenceid=2262, filesize=34.3 K 2024-12-10T00:26:58,670 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/c/f2c40ec553a64f378a7f0436603d68cd as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/c/f2c40ec553a64f378a7f0436603d68cd 2024-12-10T00:26:58,677 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/c/f2c40ec553a64f378a7f0436603d68cd, entries=262, sequenceid=2262, filesize=20.3 K 2024-12-10T00:26:58,678 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 8a99b4b7cca80643fea86456a286fda3 in 85ms, sequenceid=2262, compaction requested=false; wal=null 2024-12-10T00:26:58,697 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1733790413770/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:58,701 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000003002 2024-12-10T00:26:58,701 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T00:26:58,701 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8a99b4b7cca80643fea86456a286fda3 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-12-10T00:26:58,712 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/c/6caecc6e56e44f6cbed923c80302ccbe is 62, key is testReplayEditsWrittenIntoWAL/c:262/1733790413723/Put/seqid=0 2024-12-10T00:26:58,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741863_1040 (size=50301) 2024-12-10T00:26:58,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741863_1040 (size=50301) 2024-12-10T00:26:58,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741863_1040 (size=50301) 2024-12-10T00:26:58,725 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/c/6caecc6e56e44f6cbed923c80302ccbe 2024-12-10T00:26:58,731 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6caecc6e56e44f6cbed923c80302ccbe 2024-12-10T00:26:58,732 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/.tmp/c/6caecc6e56e44f6cbed923c80302ccbe as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/c/6caecc6e56e44f6cbed923c80302ccbe 2024-12-10T00:26:58,739 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6caecc6e56e44f6cbed923c80302ccbe 2024-12-10T00:26:58,739 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/c/6caecc6e56e44f6cbed923c80302ccbe, entries=739, sequenceid=3002, filesize=49.1 K 2024-12-10T00:26:58,740 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 8a99b4b7cca80643fea86456a286fda3 in 39ms, sequenceid=3002, compaction requested=false; wal=null 2024-12-10T00:26:58,741 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/0000000000000003002 2024-12-10T00:26:58,742 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,742 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,743 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T00:26:58,745 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8a99b4b7cca80643fea86456a286fda3 2024-12-10T00:26:58,748 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenIntoWAL/8a99b4b7cca80643fea86456a286fda3/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-12-10T00:26:58,749 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8a99b4b7cca80643fea86456a286fda3; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59514210, jitterRate=-0.11316916346549988}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T00:26:58,750 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8a99b4b7cca80643fea86456a286fda3: Writing region info on filesystem at 1733790418004Initializing all the Stores at 1733790418005 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790418005Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790418006 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790418006Cleaning up temporary data from old regions at 1733790418742 (+736 ms)Region opened successfully at 1733790418749 (+7 ms) 2024-12-10T00:26:58,813 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 8a99b4b7cca80643fea86456a286fda3, disabling compactions & flushes 2024-12-10T00:26:58,814 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:58,814 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:58,814 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. after waiting 0 ms 2024-12-10T00:26:58,814 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:58,819 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733790413406.8a99b4b7cca80643fea86456a286fda3. 2024-12-10T00:26:58,819 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 8a99b4b7cca80643fea86456a286fda3: Waiting for close lock at 1733790418813Disabling compacts and flushes for region at 1733790418813Disabling writes for close at 1733790418814 (+1 ms)Writing region close event to WAL at 1733790418819 (+5 ms)Closed at 1733790418819 2024-12-10T00:26:58,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741857_1034 (size=95) 2024-12-10T00:26:58,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741857_1034 (size=95) 2024-12-10T00:26:58,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741857_1034 (size=95) 2024-12-10T00:26:58,880 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:26:58,880 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733790417978) 2024-12-10T00:26:58,894 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=388 (was 371) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@7436df58[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-857267088_22 at /127.0.0.1:40056 [Waiting for operation #26] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:34093 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:43885 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:34093 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:33051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-857267088_22 at /127.0.0.1:47854 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-857267088_22 at /127.0.0.1:59958 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43885 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-857267088_22 at /127.0.0.1:47806 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@602518d7[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=835 (was 747) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=293 (was 293), ProcessCount=11 (was 11), AvailableMemoryMB=7911 (was 8162) 2024-12-10T00:26:58,906 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=388, OpenFileDescriptor=835, MaxFileDescriptor=1048576, SystemLoadAverage=293, ProcessCount=11, AvailableMemoryMB=7911 2024-12-10T00:26:58,929 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:58,932 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:58,933 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:26:58,936 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T00:26:58,936 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-78828489, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-78828489, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:58,936 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-10T00:26:58,938 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T00:26:58,939 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-10T00:26:58,939 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-10T00:26:58,939 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-10T00:26:58,940 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-12-10T00:26:58,940 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-12-10T00:26:58,948 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-78828489/hregion-78828489.1733790418936, exclude list is [], retry=0 2024-12-10T00:26:58,952 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:58,952 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:58,952 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:58,954 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-78828489/hregion-78828489.1733790418936 2024-12-10T00:26:58,955 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:58,955 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 61f6df01a25b7e6ede81212b5f6083d3, NAME => 'test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:26:58,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741865_1042 (size=43) 2024-12-10T00:26:58,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741865_1042 (size=43) 2024-12-10T00:26:58,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741865_1042 (size=43) 2024-12-10T00:26:58,965 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:58,966 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,968 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61f6df01a25b7e6ede81212b5f6083d3 columnFamilyName a 2024-12-10T00:26:58,968 DEBUG [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:58,968 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(327): Store=61f6df01a25b7e6ede81212b5f6083d3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:58,969 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,970 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61f6df01a25b7e6ede81212b5f6083d3 columnFamilyName b 2024-12-10T00:26:58,970 DEBUG [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:58,971 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(327): Store=61f6df01a25b7e6ede81212b5f6083d3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:58,971 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,973 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61f6df01a25b7e6ede81212b5f6083d3 columnFamilyName c 2024-12-10T00:26:58,973 DEBUG [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:58,974 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(327): Store=61f6df01a25b7e6ede81212b5f6083d3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:58,974 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,975 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,975 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,976 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,976 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,977 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:26:58,978 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:58,981 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:26:58,982 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 61f6df01a25b7e6ede81212b5f6083d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62168274, jitterRate=-0.07362052798271179}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:26:58,982 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 61f6df01a25b7e6ede81212b5f6083d3: Writing region info on filesystem at 1733790418965Initializing all the Stores at 1733790418966 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790418966Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790418966Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790418966Cleaning up temporary data from old regions at 1733790418976 (+10 ms)Region opened successfully at 1733790418982 (+6 ms) 2024-12-10T00:26:58,983 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 61f6df01a25b7e6ede81212b5f6083d3, disabling compactions & flushes 2024-12-10T00:26:58,983 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:58,983 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:58,983 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. after waiting 0 ms 2024-12-10T00:26:58,983 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:58,983 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:58,983 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 61f6df01a25b7e6ede81212b5f6083d3: Waiting for close lock at 1733790418983Disabling compacts and flushes for region at 1733790418983Disabling writes for close at 1733790418983Writing region close event to WAL at 1733790418983Closed at 1733790418983 2024-12-10T00:26:58,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741864_1041 (size=95) 2024-12-10T00:26:58,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741864_1041 (size=95) 2024-12-10T00:26:58,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741864_1041 (size=95) 2024-12-10T00:26:58,989 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:26:58,989 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-78828489:(num 1733790418936) 2024-12-10T00:26:58,989 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:26:58,991 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:59,003 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992, exclude list is [], retry=0 2024-12-10T00:26:59,006 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:59,006 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:59,006 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:59,009 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992 2024-12-10T00:26:59,011 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:26:59,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741866_1043 (size=263359) 2024-12-10T00:26:59,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741866_1043 (size=263359) 2024-12-10T00:26:59,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741866_1043 (size=263359) 2024-12-10T00:26:59,209 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992, size=257.2 K (263359bytes) 2024-12-10T00:26:59,210 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992 2024-12-10T00:26:59,210 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992 after 0ms 2024-12-10T00:26:59,215 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:59,218 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992 took 9ms 2024-12-10T00:26:59,230 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733790418992.temp 2024-12-10T00:26:59,232 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000000001-wal.1733790418992.temp 2024-12-10T00:26:59,280 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992 so closing down 2024-12-10T00:26:59,280 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:26:59,280 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:26:59,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741867_1044 (size=263359) 2024-12-10T00:26:59,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741867_1044 (size=263359) 2024-12-10T00:26:59,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741867_1044 (size=263359) 2024-12-10T00:26:59,285 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000000001-wal.1733790418992.temp (wrote 3000 edits, skipped 0 edits in 39 ms) 2024-12-10T00:26:59,286 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000000001-wal.1733790418992.temp to hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003000 2024-12-10T00:26:59,287 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 67 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-12-10T00:26:59,287 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992, journal: Splitting hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992, size=257.2 K (263359bytes) at 1733790419210Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000000001-wal.1733790418992.temp at 1733790419232 (+22 ms)Split 1024 edits, skipped 0 edits. at 1733790419244 (+12 ms)Split 2048 edits, skipped 0 edits. at 1733790419267 (+23 ms)Finishing writing output for hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992 so closing down at 1733790419280 (+13 ms)3 split writer threads finished at 1733790419281 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000000001-wal.1733790418992.temp (wrote 3000 edits, skipped 0 edits in 39 ms) at 1733790419285 (+4 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000000001-wal.1733790418992.temp to hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003000 at 1733790419287 (+2 ms)Processed 3000 edits across 1 Regions in 67 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1733790419287 2024-12-10T00:26:59,289 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790418992 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790418992 2024-12-10T00:26:59,290 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003000 2024-12-10T00:26:59,290 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:26:59,293 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:59,306 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293, exclude list is [], retry=0 2024-12-10T00:26:59,310 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:59,310 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:59,310 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:59,312 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293 2024-12-10T00:26:59,313 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:26:59,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741868_1045 (size=263486) 2024-12-10T00:26:59,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741868_1045 (size=263486) 2024-12-10T00:26:59,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741868_1045 (size=263486) 2024-12-10T00:26:59,474 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293, size=257.3 K (263486bytes) 2024-12-10T00:26:59,474 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293 2024-12-10T00:26:59,475 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293 after 1ms 2024-12-10T00:26:59,478 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:59,480 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293 took 6ms 2024-12-10T00:26:59,485 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1733790419293.temp 2024-12-10T00:26:59,487 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003001-wal.1733790419293.temp 2024-12-10T00:26:59,525 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293 so closing down 2024-12-10T00:26:59,525 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:26:59,525 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:26:59,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741869_1046 (size=263486) 2024-12-10T00:26:59,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741869_1046 (size=263486) 2024-12-10T00:26:59,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741869_1046 (size=263486) 2024-12-10T00:26:59,531 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003001-wal.1733790419293.temp (wrote 3000 edits, skipped 0 edits in 38 ms) 2024-12-10T00:26:59,533 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003001-wal.1733790419293.temp to hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000006000 2024-12-10T00:26:59,533 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 52 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-12-10T00:26:59,534 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293, journal: Splitting hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293, size=257.3 K (263486bytes) at 1733790419474Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003001-wal.1733790419293.temp at 1733790419487 (+13 ms)Split 1024 edits, skipped 0 edits. at 1733790419496 (+9 ms)Split 2048 edits, skipped 0 edits. at 1733790419511 (+15 ms)Finishing writing output for hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293 so closing down at 1733790419525 (+14 ms)3 split writer threads finished at 1733790419525Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003001-wal.1733790419293.temp (wrote 3000 edits, skipped 0 edits in 38 ms) at 1733790419531 (+6 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003001-wal.1733790419293.temp to hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000006000 at 1733790419533 (+2 ms)Processed 3000 edits across 1 Regions in 52 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1733790419533 2024-12-10T00:26:59,536 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419293 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790419293 2024-12-10T00:26:59,537 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000006000 2024-12-10T00:26:59,537 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:26:59,540 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/test2727-manual,16010,1733790418928, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:59,554 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419540, exclude list is [], retry=0 2024-12-10T00:26:59,557 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:26:59,558 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:26:59,558 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:26:59,561 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733790418928/wal.1733790419540 2024-12-10T00:26:59,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:26:59,561 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 61f6df01a25b7e6ede81212b5f6083d3, NAME => 'test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:26:59,562 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:59,562 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,562 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,564 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,565 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61f6df01a25b7e6ede81212b5f6083d3 columnFamilyName a 2024-12-10T00:26:59,566 DEBUG [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:59,566 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(327): Store=61f6df01a25b7e6ede81212b5f6083d3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:59,567 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,568 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61f6df01a25b7e6ede81212b5f6083d3 columnFamilyName b 2024-12-10T00:26:59,568 DEBUG [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:59,569 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(327): Store=61f6df01a25b7e6ede81212b5f6083d3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:59,569 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,571 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61f6df01a25b7e6ede81212b5f6083d3 columnFamilyName c 2024-12-10T00:26:59,571 DEBUG [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:59,572 INFO [StoreOpener-61f6df01a25b7e6ede81212b5f6083d3-1 {}] regionserver.HStore(327): Store=61f6df01a25b7e6ede81212b5f6083d3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:59,572 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,573 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,575 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,576 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003000 2024-12-10T00:26:59,580 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:59,644 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003000 2024-12-10T00:26:59,646 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000006000 2024-12-10T00:26:59,649 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:26:59,687 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000006000 2024-12-10T00:26:59,687 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 61f6df01a25b7e6ede81212b5f6083d3 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-12-10T00:26:59,707 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/a/4fffaa61f0b94e56b7cc6be5889abf25 is 41, key is test2727/a:100/1733790419317/Put/seqid=0 2024-12-10T00:26:59,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741871_1048 (size=84227) 2024-12-10T00:26:59,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741871_1048 (size=84227) 2024-12-10T00:26:59,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741871_1048 (size=84227) 2024-12-10T00:26:59,715 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/a/4fffaa61f0b94e56b7cc6be5889abf25 2024-12-10T00:26:59,742 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/b/498b35d005034a8997d2c660b5c375dc is 41, key is test2727/b:100/1733790419359/Put/seqid=0 2024-12-10T00:26:59,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741872_1049 (size=84609) 2024-12-10T00:26:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741872_1049 (size=84609) 2024-12-10T00:26:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741872_1049 (size=84609) 2024-12-10T00:26:59,748 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/b/498b35d005034a8997d2c660b5c375dc 2024-12-10T00:26:59,773 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/c/e1f17137124a4f8da2d48896175ec6db is 41, key is test2727/c:100/1733790419403/Put/seqid=0 2024-12-10T00:26:59,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741873_1050 (size=84609) 2024-12-10T00:26:59,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741873_1050 (size=84609) 2024-12-10T00:26:59,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741873_1050 (size=84609) 2024-12-10T00:26:59,783 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/c/e1f17137124a4f8da2d48896175ec6db 2024-12-10T00:26:59,790 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/a/4fffaa61f0b94e56b7cc6be5889abf25 as hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/a/4fffaa61f0b94e56b7cc6be5889abf25 2024-12-10T00:26:59,798 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/a/4fffaa61f0b94e56b7cc6be5889abf25, entries=2000, sequenceid=6000, filesize=82.3 K 2024-12-10T00:26:59,800 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/b/498b35d005034a8997d2c660b5c375dc as hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/b/498b35d005034a8997d2c660b5c375dc 2024-12-10T00:26:59,808 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/b/498b35d005034a8997d2c660b5c375dc, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-10T00:26:59,809 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/.tmp/c/e1f17137124a4f8da2d48896175ec6db as hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/c/e1f17137124a4f8da2d48896175ec6db 2024-12-10T00:26:59,817 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/c/e1f17137124a4f8da2d48896175ec6db, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-10T00:26:59,817 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 61f6df01a25b7e6ede81212b5f6083d3 in 130ms, sequenceid=6000, compaction requested=false; wal=null 2024-12-10T00:26:59,818 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000003000 2024-12-10T00:26:59,819 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/0000000000000006000 2024-12-10T00:26:59,820 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,820 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,821 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:26:59,823 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 61f6df01a25b7e6ede81212b5f6083d3 2024-12-10T00:26:59,826 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/test2727/61f6df01a25b7e6ede81212b5f6083d3/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-12-10T00:26:59,827 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 61f6df01a25b7e6ede81212b5f6083d3; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65428464, jitterRate=-0.0250399112701416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:26:59,828 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 61f6df01a25b7e6ede81212b5f6083d3: Writing region info on filesystem at 1733790419562Initializing all the Stores at 1733790419563 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790419563Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790419564 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790419564Obtaining lock to block concurrent updates at 1733790419687 (+123 ms)Preparing flush snapshotting stores in 61f6df01a25b7e6ede81212b5f6083d3 at 1733790419687Finished memstore snapshotting test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1733790419688 (+1 ms)Flushing stores of test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. at 1733790419688Flushing 61f6df01a25b7e6ede81212b5f6083d3/a: creating writer at 1733790419688Flushing 61f6df01a25b7e6ede81212b5f6083d3/a: appending metadata at 1733790419706 (+18 ms)Flushing 61f6df01a25b7e6ede81212b5f6083d3/a: closing flushed file at 1733790419707 (+1 ms)Flushing 61f6df01a25b7e6ede81212b5f6083d3/b: creating writer at 1733790419721 (+14 ms)Flushing 61f6df01a25b7e6ede81212b5f6083d3/b: appending metadata at 1733790419740 (+19 ms)Flushing 61f6df01a25b7e6ede81212b5f6083d3/b: closing flushed file at 1733790419740Flushing 61f6df01a25b7e6ede81212b5f6083d3/c: creating writer at 1733790419755 (+15 ms)Flushing 61f6df01a25b7e6ede81212b5f6083d3/c: appending metadata at 1733790419772 (+17 ms)Flushing 61f6df01a25b7e6ede81212b5f6083d3/c: closing flushed file at 1733790419772Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e8f0d66: reopening flushed file at 1733790419789 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44d0d99b: reopening flushed file at 1733790419798 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@735791e4: reopening flushed file at 1733790419808 (+10 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 61f6df01a25b7e6ede81212b5f6083d3 in 130ms, sequenceid=6000, compaction requested=false; wal=null at 1733790419817 (+9 ms)Cleaning up temporary data from old regions at 1733790419820 (+3 ms)Region opened successfully at 1733790419828 (+8 ms) 2024-12-10T00:26:59,829 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-12-10T00:26:59,829 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 61f6df01a25b7e6ede81212b5f6083d3, disabling compactions & flushes 2024-12-10T00:26:59,830 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:59,830 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:59,830 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. after waiting 0 ms 2024-12-10T00:26:59,830 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:59,831 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733790418930.61f6df01a25b7e6ede81212b5f6083d3. 2024-12-10T00:26:59,831 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 61f6df01a25b7e6ede81212b5f6083d3: Waiting for close lock at 1733790419829Disabling compacts and flushes for region at 1733790419829Disabling writes for close at 1733790419830 (+1 ms)Writing region close event to WAL at 1733790419831 (+1 ms)Closed at 1733790419831 2024-12-10T00:26:59,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741870_1047 (size=95) 2024-12-10T00:26:59,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741870_1047 (size=95) 2024-12-10T00:26:59,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741870_1047 (size=95) 2024-12-10T00:26:59,839 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:26:59,840 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733790419540) 2024-12-10T00:26:59,855 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=393 (was 388) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:60122 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:40200 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:47854 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:47806 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=899 (was 835) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=293 (was 293), ProcessCount=11 (was 11), AvailableMemoryMB=7689 (was 7911) 2024-12-10T00:26:59,866 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=393, OpenFileDescriptor=899, MaxFileDescriptor=1048576, SystemLoadAverage=293, ProcessCount=11, AvailableMemoryMB=7687 2024-12-10T00:26:59,880 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:26:59,887 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:26:59,888 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1733790419888 2024-12-10T00:26:59,896 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888 2024-12-10T00:26:59,898 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:26:59,900 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c24ccc3c7e90bcaef8e7f9f2148ddae, NAME => 'testSequentialEditLogSeqNum,,1733790419881.2c24ccc3c7e90bcaef8e7f9f2148ddae.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:26:59,900 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1733790419881.2c24ccc3c7e90bcaef8e7f9f2148ddae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:26:59,900 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,900 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,901 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae doesn't exist for region: 2c24ccc3c7e90bcaef8e7f9f2148ddae on table testSequentialEditLogSeqNum 2024-12-10T00:26:59,902 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 2c24ccc3c7e90bcaef8e7f9f2148ddae on table testSequentialEditLogSeqNum 2024-12-10T00:26:59,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741875_1052 (size=62) 2024-12-10T00:26:59,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741875_1052 (size=62) 2024-12-10T00:26:59,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741875_1052 (size=62) 2024-12-10T00:26:59,914 INFO [StoreOpener-2c24ccc3c7e90bcaef8e7f9f2148ddae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,915 INFO [StoreOpener-2c24ccc3c7e90bcaef8e7f9f2148ddae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c24ccc3c7e90bcaef8e7f9f2148ddae columnFamilyName a 2024-12-10T00:26:59,915 DEBUG [StoreOpener-2c24ccc3c7e90bcaef8e7f9f2148ddae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:26:59,916 INFO [StoreOpener-2c24ccc3c7e90bcaef8e7f9f2148ddae-1 {}] regionserver.HStore(327): Store=2c24ccc3c7e90bcaef8e7f9f2148ddae/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:26:59,916 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,917 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,917 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,917 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,917 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,919 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2c24ccc3c7e90bcaef8e7f9f2148ddae 2024-12-10T00:26:59,921 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:26:59,922 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2c24ccc3c7e90bcaef8e7f9f2148ddae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62864315, jitterRate=-0.06324870884418488}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:26:59,923 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2c24ccc3c7e90bcaef8e7f9f2148ddae: Writing region info on filesystem at 1733790419901Initializing all the Stores at 1733790419913 (+12 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790419913Cleaning up temporary data from old regions at 1733790419917 (+4 ms)Region opened successfully at 1733790419922 (+5 ms) 2024-12-10T00:26:59,936 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2c24ccc3c7e90bcaef8e7f9f2148ddae 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-12-10T00:26:59,962 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/.tmp/a/839896e77fda45c39a4c1d2abd8e0f25 is 81, key is testSequentialEditLogSeqNum/a:x0/1733790419923/Put/seqid=0 2024-12-10T00:26:59,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741876_1053 (size=5833) 2024-12-10T00:26:59,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741876_1053 (size=5833) 2024-12-10T00:26:59,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741876_1053 (size=5833) 2024-12-10T00:26:59,970 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/.tmp/a/839896e77fda45c39a4c1d2abd8e0f25 2024-12-10T00:26:59,977 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/.tmp/a/839896e77fda45c39a4c1d2abd8e0f25 as hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/a/839896e77fda45c39a4c1d2abd8e0f25 2024-12-10T00:26:59,984 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/a/839896e77fda45c39a4c1d2abd8e0f25, entries=10, sequenceid=13, filesize=5.7 K 2024-12-10T00:26:59,986 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 2c24ccc3c7e90bcaef8e7f9f2148ddae in 50ms, sequenceid=13, compaction requested=false 2024-12-10T00:26:59,986 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2c24ccc3c7e90bcaef8e7f9f2148ddae: 2024-12-10T00:26:59,992 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T00:26:59,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T00:26:59,993 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T00:26:59,993 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T00:26:59,993 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T00:26:59,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741874_1051 (size=1843) 2024-12-10T00:26:59,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741874_1051 (size=1843) 2024-12-10T00:26:59,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741874_1051 (size=1843) 2024-12-10T00:27:00,023 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888, size=1.8 K (1843bytes) 2024-12-10T00:27:00,023 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888 2024-12-10T00:27:00,024 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888 after 0ms 2024-12-10T00:27:00,027 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:00,027 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888 took 5ms 2024-12-10T00:27:00,031 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888 so closing down 2024-12-10T00:27:00,031 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:00,032 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733790419888.temp 2024-12-10T00:27:00,034 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000003-wal.1733790419888.temp 2024-12-10T00:27:00,034 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:00,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741877_1054 (size=1477) 2024-12-10T00:27:00,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741877_1054 (size=1477) 2024-12-10T00:27:00,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741877_1054 (size=1477) 2024-12-10T00:27:00,044 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000003-wal.1733790419888.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:00,046 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000003-wal.1733790419888.temp to hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000020 2024-12-10T00:27:00,046 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 18 ms; skipped=2; WAL=hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888, size=1.8 K, length=1843, corrupted=false, cancelled=false 2024-12-10T00:27:00,046 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888, journal: Splitting hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888, size=1.8 K (1843bytes) at 1733790420023Finishing writing output for hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888 so closing down at 1733790420031 (+8 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000003-wal.1733790419888.temp at 1733790420034 (+3 ms)3 split writer threads finished at 1733790420034Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000003-wal.1733790419888.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733790420044 (+10 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000003-wal.1733790419888.temp to hdfs://localhost:34093/hbase/data/default/testSequentialEditLogSeqNum/2c24ccc3c7e90bcaef8e7f9f2148ddae/recovered.edits/0000000000000000020 at 1733790420046 (+2 ms)Processed 17 edits across 1 Regions in 18 ms; skipped=2; WAL=hdfs://localhost:34093/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733790419879/wal.1733790419888, size=1.8 K, length=1843, corrupted=false, cancelled=false at 1733790420046 2024-12-10T00:27:00,061 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=398 (was 393) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:60122 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:47854 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:47806 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=937 (was 899) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=318 (was 293) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7672 (was 7687) 2024-12-10T00:27:00,073 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=398, OpenFileDescriptor=937, MaxFileDescriptor=1048576, SystemLoadAverage=318, ProcessCount=11, AvailableMemoryMB=7671 2024-12-10T00:27:00,088 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:00,091 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:00,092 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:27:00,095 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-49363443, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-49363443, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:00,135 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-49363443/hregion-49363443.1733790420096, exclude list is [], retry=0 2024-12-10T00:27:00,139 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:00,153 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:00,157 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:00,193 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-49363443/hregion-49363443.1733790420096 2024-12-10T00:27:00,194 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:00,194 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => d23abe201a736bbcb48d0d1bb05b02c0, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:27:00,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741879_1056 (size=70) 2024-12-10T00:27:00,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741879_1056 (size=70) 2024-12-10T00:27:00,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741879_1056 (size=70) 2024-12-10T00:27:00,242 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:00,243 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,245 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName a 2024-12-10T00:27:00,245 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:00,245 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:00,246 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,248 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName b 2024-12-10T00:27:00,248 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:00,248 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:00,249 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,250 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName c 2024-12-10T00:27:00,250 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:00,251 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:00,251 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,251 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,252 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,253 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,253 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,254 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:00,255 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,257 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:27:00,258 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d23abe201a736bbcb48d0d1bb05b02c0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59462588, jitterRate=-0.11393839120864868}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:00,259 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d23abe201a736bbcb48d0d1bb05b02c0: Writing region info on filesystem at 1733790420242Initializing all the Stores at 1733790420243 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790420243Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790420243Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790420243Cleaning up temporary data from old regions at 1733790420253 (+10 ms)Region opened successfully at 1733790420258 (+5 ms) 2024-12-10T00:27:00,259 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d23abe201a736bbcb48d0d1bb05b02c0, disabling compactions & flushes 2024-12-10T00:27:00,259 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:00,259 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:00,259 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. after waiting 0 ms 2024-12-10T00:27:00,259 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:00,259 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:00,259 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d23abe201a736bbcb48d0d1bb05b02c0: Waiting for close lock at 1733790420259Disabling compacts and flushes for region at 1733790420259Disabling writes for close at 1733790420259Writing region close event to WAL at 1733790420259Closed at 1733790420259 2024-12-10T00:27:00,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741878_1055 (size=95) 2024-12-10T00:27:00,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741878_1055 (size=95) 2024-12-10T00:27:00,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741878_1055 (size=95) 2024-12-10T00:27:00,265 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:00,265 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-49363443:(num 1733790420096) 2024-12-10T00:27:00,266 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:00,268 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:00,285 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268, exclude list is [], retry=0 2024-12-10T00:27:00,288 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:00,289 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:00,289 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:00,291 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 2024-12-10T00:27:00,291 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:27:00,291 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d23abe201a736bbcb48d0d1bb05b02c0, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:00,292 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:00,292 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,292 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,293 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,294 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName a 2024-12-10T00:27:00,294 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:00,295 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:00,295 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,296 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName b 2024-12-10T00:27:00,296 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:00,296 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:00,296 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,297 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName c 2024-12-10T00:27:00,297 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:00,298 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:00,298 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,298 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,300 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,301 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,301 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,301 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:00,303 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:00,303 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d23abe201a736bbcb48d0d1bb05b02c0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62722685, jitterRate=-0.06535916030406952}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:00,304 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d23abe201a736bbcb48d0d1bb05b02c0: Writing region info on filesystem at 1733790420292Initializing all the Stores at 1733790420293 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790420293Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790420293Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790420293Cleaning up temporary data from old regions at 1733790420301 (+8 ms)Region opened successfully at 1733790420304 (+3 ms) 2024-12-10T00:27:00,308 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1733790420307/Put/seqid=0 2024-12-10T00:27:00,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741881_1058 (size=4826) 2024-12-10T00:27:00,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741881_1058 (size=4826) 2024-12-10T00:27:00,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741881_1058 (size=4826) 2024-12-10T00:27:00,316 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34093/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in d23abe201a736bbcb48d0d1bb05b02c0/a 2024-12-10T00:27:00,326 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-12-10T00:27:00,327 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T00:27:00,327 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d23abe201a736bbcb48d0d1bb05b02c0: 2024-12-10T00:27:00,329 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/a/df02a1795f474f54ae20e160090e7679_SeqId_3_ 2024-12-10T00:27:00,330 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34093/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into d23abe201a736bbcb48d0d1bb05b02c0/a as hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/a/df02a1795f474f54ae20e160090e7679_SeqId_3_ - updating store file list. 2024-12-10T00:27:00,337 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for df02a1795f474f54ae20e160090e7679_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:00,338 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/a/df02a1795f474f54ae20e160090e7679_SeqId_3_ into d23abe201a736bbcb48d0d1bb05b02c0/a 2024-12-10T00:27:00,338 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34093/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into d23abe201a736bbcb48d0d1bb05b02c0/a (new location: hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/a/df02a1795f474f54ae20e160090e7679_SeqId_3_) 2024-12-10T00:27:00,377 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268, size=0 (0bytes) 2024-12-10T00:27:00,377 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 might be still open, length is 0 2024-12-10T00:27:00,378 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 2024-12-10T00:27:00,378 WARN [IPC Server handler 1 on default port 34093 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-12-10T00:27:00,378 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 after 0ms 2024-12-10T00:27:02,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:45692 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:35811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45692 dst: /127.0.0.1:35811 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35811 remote=/127.0.0.1:45692]. Total timeout mills is 60000, 57706 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:02,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:42490 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:37237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42490 dst: /127.0.0.1:37237 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:02,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:36356 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:34693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36356 dst: /127.0.0.1:34693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:02,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741880_1059 (size=474) 2024-12-10T00:27:02,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741880_1059 (size=474) 2024-12-10T00:27:04,379 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 after 4001ms 2024-12-10T00:27:04,383 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:04,384 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 took 4007ms 2024-12-10T00:27:04,386 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268; continuing. 2024-12-10T00:27:04,386 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 so closing down 2024-12-10T00:27:04,386 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:04,389 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1733790420268.temp 2024-12-10T00:27:04,391 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005-wal.1733790420268.temp 2024-12-10T00:27:04,391 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:04,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741882_1060 (size=259) 2024-12-10T00:27:04,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741882_1060 (size=259) 2024-12-10T00:27:04,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741882_1060 (size=259) 2024-12-10T00:27:04,399 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005-wal.1733790420268.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:04,401 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005-wal.1733790420268.temp to hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005 2024-12-10T00:27:04,401 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 17 ms; skipped=1; WAL=hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T00:27:04,401 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268, journal: Splitting hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268, size=0 (0bytes) at 1733790420377Finishing writing output for hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 so closing down at 1733790424386 (+4009 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005-wal.1733790420268.temp at 1733790424391 (+5 ms)3 split writer threads finished at 1733790424391Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005-wal.1733790420268.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733790424400 (+9 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005-wal.1733790420268.temp to hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005 at 1733790424401 (+1 ms)Processed 2 edits across 1 Regions in 17 ms; skipped=1; WAL=hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268, size=0, length=0, corrupted=false, cancelled=false at 1733790424401 2024-12-10T00:27:04,403 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790420268 2024-12-10T00:27:04,404 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005 2024-12-10T00:27:04,404 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:04,406 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:04,417 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790424406, exclude list is [], retry=0 2024-12-10T00:27:04,419 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:04,420 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:04,420 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:04,422 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790424406 2024-12-10T00:27:04,422 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:27:04,422 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d23abe201a736bbcb48d0d1bb05b02c0, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:04,422 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:04,423 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,423 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,424 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,425 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName a 2024-12-10T00:27:04,425 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:04,431 DEBUG [StoreFileOpener-d23abe201a736bbcb48d0d1bb05b02c0-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for df02a1795f474f54ae20e160090e7679_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:04,431 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/a/df02a1795f474f54ae20e160090e7679_SeqId_3_ 2024-12-10T00:27:04,431 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:04,431 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,432 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName b 2024-12-10T00:27:04,432 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:04,433 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:04,433 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,434 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d23abe201a736bbcb48d0d1bb05b02c0 columnFamilyName c 2024-12-10T00:27:04,434 DEBUG [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:04,434 INFO [StoreOpener-d23abe201a736bbcb48d0d1bb05b02c0-1 {}] regionserver.HStore(327): Store=d23abe201a736bbcb48d0d1bb05b02c0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:04,434 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,435 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,436 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,437 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005 2024-12-10T00:27:04,439 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:04,440 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005 2024-12-10T00:27:04,440 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d23abe201a736bbcb48d0d1bb05b02c0 3/3 column families, dataSize=58 B heapSize=904 B 2024-12-10T00:27:04,454 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/.tmp/a/ca79678865544da982bde0f8c5ca859b is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1733790420343/Put/seqid=0 2024-12-10T00:27:04,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741884_1062 (size=5149) 2024-12-10T00:27:04,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741884_1062 (size=5149) 2024-12-10T00:27:04,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741884_1062 (size=5149) 2024-12-10T00:27:04,460 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/.tmp/a/ca79678865544da982bde0f8c5ca859b 2024-12-10T00:27:04,466 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/.tmp/a/ca79678865544da982bde0f8c5ca859b as hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/a/ca79678865544da982bde0f8c5ca859b 2024-12-10T00:27:04,473 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/a/ca79678865544da982bde0f8c5ca859b, entries=1, sequenceid=5, filesize=5.0 K 2024-12-10T00:27:04,474 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for d23abe201a736bbcb48d0d1bb05b02c0 in 34ms, sequenceid=5, compaction requested=false; wal=null 2024-12-10T00:27:04,474 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/0000000000000000005 2024-12-10T00:27:04,476 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,476 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,477 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:04,478 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d23abe201a736bbcb48d0d1bb05b02c0 2024-12-10T00:27:04,480 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/d23abe201a736bbcb48d0d1bb05b02c0/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-12-10T00:27:04,481 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d23abe201a736bbcb48d0d1bb05b02c0; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73494341, jitterRate=0.09515102207660675}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:04,482 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d23abe201a736bbcb48d0d1bb05b02c0: Writing region info on filesystem at 1733790424423Initializing all the Stores at 1733790424424 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790424424Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790424424Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790424424Obtaining lock to block concurrent updates at 1733790424440 (+16 ms)Preparing flush snapshotting stores in d23abe201a736bbcb48d0d1bb05b02c0 at 1733790424440Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1733790424440Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. at 1733790424440Flushing d23abe201a736bbcb48d0d1bb05b02c0/a: creating writer at 1733790424440Flushing d23abe201a736bbcb48d0d1bb05b02c0/a: appending metadata at 1733790424453 (+13 ms)Flushing d23abe201a736bbcb48d0d1bb05b02c0/a: closing flushed file at 1733790424453Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@485d2f01: reopening flushed file at 1733790424465 (+12 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for d23abe201a736bbcb48d0d1bb05b02c0 in 34ms, sequenceid=5, compaction requested=false; wal=null at 1733790424474 (+9 ms)Cleaning up temporary data from old regions at 1733790424476 (+2 ms)Region opened successfully at 1733790424481 (+5 ms) 2024-12-10T00:27:04,486 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d23abe201a736bbcb48d0d1bb05b02c0, disabling compactions & flushes 2024-12-10T00:27:04,486 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:04,486 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:04,486 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. after waiting 0 ms 2024-12-10T00:27:04,486 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:04,487 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733790420089.d23abe201a736bbcb48d0d1bb05b02c0. 2024-12-10T00:27:04,487 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d23abe201a736bbcb48d0d1bb05b02c0: Waiting for close lock at 1733790424485Disabling compacts and flushes for region at 1733790424485Disabling writes for close at 1733790424486 (+1 ms)Writing region close event to WAL at 1733790424487 (+1 ms)Closed at 1733790424487 2024-12-10T00:27:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741883_1061 (size=95) 2024-12-10T00:27:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741883_1061 (size=95) 2024-12-10T00:27:04,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741883_1061 (size=95) 2024-12-10T00:27:04,491 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:04,491 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733790424406) 2024-12-10T00:27:04,507 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=400 (was 398) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:34093 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:34093 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1495830774_22 at /127.0.0.1:42530 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1495830774_22 at /127.0.0.1:36402 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1495830774_22 at /127.0.0.1:45722 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=995 (was 937) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=318 (was 318), ProcessCount=11 (was 11), AvailableMemoryMB=7460 (was 7671) 2024-12-10T00:27:04,520 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=400, OpenFileDescriptor=995, MaxFileDescriptor=1048576, SystemLoadAverage=318, ProcessCount=11, AvailableMemoryMB=7458 2024-12-10T00:27:04,533 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:04,537 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T00:27:04,542 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is a67c4886b4f7,41433,1733790408551 2024-12-10T00:27:04,545 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@239a9420 2024-12-10T00:27:04,546 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T00:27:04,548 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45262, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T00:27:04,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T00:27:04,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-12-10T00:27:04,559 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T00:27:04,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-12-10T00:27:04,561 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:04,563 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T00:27:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T00:27:04,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741885_1063 (size=694) 2024-12-10T00:27:04,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741885_1063 (size=694) 2024-12-10T00:27:04,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741885_1063 (size=694) 2024-12-10T00:27:04,577 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0273fa7e527747c1d22b1fb928589bc9, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507 2024-12-10T00:27:04,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741886_1064 (size=77) 2024-12-10T00:27:04,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741886_1064 (size=77) 2024-12-10T00:27:04,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741886_1064 (size=77) 2024-12-10T00:27:04,586 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:04,586 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 0273fa7e527747c1d22b1fb928589bc9, disabling compactions & flushes 2024-12-10T00:27:04,586 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:04,586 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:04,586 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. after waiting 0 ms 2024-12-10T00:27:04,586 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:04,586 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:04,586 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0273fa7e527747c1d22b1fb928589bc9: Waiting for close lock at 1733790424586Disabling compacts and flushes for region at 1733790424586Disabling writes for close at 1733790424586Writing region close event to WAL at 1733790424586Closed at 1733790424586 2024-12-10T00:27:04,588 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T00:27:04,592 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1733790424588"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733790424588"}]},"ts":"1733790424588"} 2024-12-10T00:27:04,595 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T00:27:04,597 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T00:27:04,599 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733790424597"}]},"ts":"1733790424597"} 2024-12-10T00:27:04,602 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-12-10T00:27:04,603 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {a67c4886b4f7=0} racks are {/default-rack=0} 2024-12-10T00:27:04,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T00:27:04,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T00:27:04,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T00:27:04,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T00:27:04,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T00:27:04,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T00:27:04,604 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T00:27:04,604 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T00:27:04,604 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T00:27:04,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T00:27:04,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN}] 2024-12-10T00:27:04,607 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN 2024-12-10T00:27:04,608 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN; state=OFFLINE, location=a67c4886b4f7,36039,1733790409906; forceNewPlan=false, retain=false 2024-12-10T00:27:04,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T00:27:04,761 INFO [a67c4886b4f7:41433 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T00:27:04,762 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPENING, regionLocation=a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:04,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN because future has completed 2024-12-10T00:27:04,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906}] 2024-12-10T00:27:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T00:27:04,919 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T00:27:04,923 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44201, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T00:27:04,931 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:04,932 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0273fa7e527747c1d22b1fb928589bc9, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:04,932 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,933 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:04,933 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,933 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,934 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,936 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf1 2024-12-10T00:27:04,936 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:04,936 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:04,936 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,938 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf2 2024-12-10T00:27:04,938 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:04,938 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:04,938 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,939 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,940 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,940 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,940 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,941 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T00:27:04,942 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,944 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:27:04,945 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0273fa7e527747c1d22b1fb928589bc9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66530176, jitterRate=-0.008623123168945312}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T00:27:04,945 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:04,946 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0273fa7e527747c1d22b1fb928589bc9: Running coprocessor pre-open hook at 1733790424933Writing region info on filesystem at 1733790424933Initializing all the Stores at 1733790424934 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790424934Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790424934Cleaning up temporary data from old regions at 1733790424940 (+6 ms)Running coprocessor post-open hooks at 1733790424945 (+5 ms)Region opened successfully at 1733790424946 (+1 ms) 2024-12-10T00:27:04,947 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., pid=6, masterSystemTime=1733790424919 2024-12-10T00:27:04,950 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:04,950 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:04,951 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPEN, openSeqNum=2, regionLocation=a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:04,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906 because future has completed 2024-12-10T00:27:04,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T00:27:04,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906 in 192 msec 2024-12-10T00:27:04,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T00:27:04,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN in 355 msec 2024-12-10T00:27:04,966 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T00:27:04,966 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733790424966"}]},"ts":"1733790424966"} 2024-12-10T00:27:04,968 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-12-10T00:27:04,969 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T00:27:04,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 417 msec 2024-12-10T00:27:05,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T00:27:05,197 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-12-10T00:27:05,197 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-12-10T00:27:05,198 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T00:27:05,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-12-10T00:27:05,206 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T00:27:05,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-12-10T00:27:05,218 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=2] 2024-12-10T00:27:05,219 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T00:27:05,221 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53462, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T00:27:05,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=0273fa7e527747c1d22b1fb928589bc9, source=a67c4886b4f7,36039,1733790409906, destination=a67c4886b4f7,39473,1733790409727, warming up region on a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:05,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T00:27:05,238 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=0273fa7e527747c1d22b1fb928589bc9, source=a67c4886b4f7,36039,1733790409906, destination=a67c4886b4f7,39473,1733790409727, running balancer 2024-12-10T00:27:05,239 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34561, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T00:27:05,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE 2024-12-10T00:27:05,240 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE 2024-12-10T00:27:05,242 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=CLOSING, regionLocation=a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:05,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(7855): Warmup {ENCODED => 0273fa7e527747c1d22b1fb928589bc9, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:05,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:05,245 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE because future has completed 2024-12-10T00:27:05,246 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf1 2024-12-10T00:27:05,246 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:05,246 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T00:27:05,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906}] 2024-12-10T00:27:05,246 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:05,247 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,248 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf2 2024-12-10T00:27:05,248 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:05,248 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(1722): Closing 0273fa7e527747c1d22b1fb928589bc9, disabling compactions & flushes 2024-12-10T00:27:05,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. after waiting 0 ms 2024-12-10T00:27:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] regionserver.HRegion(1676): Region close journal for 0273fa7e527747c1d22b1fb928589bc9: Waiting for close lock at 1733790425249Disabling compacts and flushes for region at 1733790425249Disabling writes for close at 1733790425249Writing region close event to WAL at 1733790425249Closed at 1733790425249 2024-12-10T00:27:05,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-12-10T00:27:05,404 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,405 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-10T00:27:05,405 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 0273fa7e527747c1d22b1fb928589bc9, disabling compactions & flushes 2024-12-10T00:27:05,405 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,405 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,405 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. after waiting 0 ms 2024-12-10T00:27:05,405 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,405 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 0273fa7e527747c1d22b1fb928589bc9 2/2 column families, dataSize=31 B heapSize=616 B 2024-12-10T00:27:05,422 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf1/7505668472174e6d9e3df22c4dcd63c4 is 35, key is r1/cf1:q/1733790425222/Put/seqid=0 2024-12-10T00:27:05,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741887_1065 (size=4783) 2024-12-10T00:27:05,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741887_1065 (size=4783) 2024-12-10T00:27:05,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741887_1065 (size=4783) 2024-12-10T00:27:05,430 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf1/7505668472174e6d9e3df22c4dcd63c4 2024-12-10T00:27:05,437 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf1/7505668472174e6d9e3df22c4dcd63c4 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4 2024-12-10T00:27:05,444 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T00:27:05,445 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 0273fa7e527747c1d22b1fb928589bc9 in 40ms, sequenceid=5, compaction requested=false 2024-12-10T00:27:05,445 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-12-10T00:27:05,451 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T00:27:05,453 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,453 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 0273fa7e527747c1d22b1fb928589bc9: Waiting for close lock at 1733790425405Running coprocessor pre-close hooks at 1733790425405Disabling compacts and flushes for region at 1733790425405Disabling writes for close at 1733790425405Obtaining lock to block concurrent updates at 1733790425405Preparing flush snapshotting stores in 0273fa7e527747c1d22b1fb928589bc9 at 1733790425405Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1733790425406 (+1 ms)Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. at 1733790425406Flushing 0273fa7e527747c1d22b1fb928589bc9/cf1: creating writer at 1733790425406Flushing 0273fa7e527747c1d22b1fb928589bc9/cf1: appending metadata at 1733790425422 (+16 ms)Flushing 0273fa7e527747c1d22b1fb928589bc9/cf1: closing flushed file at 1733790425422Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5886190b: reopening flushed file at 1733790425437 (+15 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 0273fa7e527747c1d22b1fb928589bc9 in 40ms, sequenceid=5, compaction requested=false at 1733790425445 (+8 ms)Writing region close event to WAL at 1733790425447 (+2 ms)Running coprocessor post-close hooks at 1733790425452 (+5 ms)Closed at 1733790425453 (+1 ms) 2024-12-10T00:27:05,454 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 0273fa7e527747c1d22b1fb928589bc9 move to a67c4886b4f7,39473,1733790409727 record at close sequenceid=5 2024-12-10T00:27:05,457 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=CLOSED 2024-12-10T00:27:05,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906 because future has completed 2024-12-10T00:27:05,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T00:27:05,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906 in 216 msec 2024-12-10T00:27:05,466 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE; state=CLOSED, location=a67c4886b4f7,39473,1733790409727; forceNewPlan=false, retain=false 2024-12-10T00:27:05,617 INFO [a67c4886b4f7:41433 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T00:27:05,618 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPENING, regionLocation=a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:05,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE because future has completed 2024-12-10T00:27:05,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727}] 2024-12-10T00:27:05,786 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,787 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 0273fa7e527747c1d22b1fb928589bc9, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:05,787 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,788 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:05,788 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,788 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,791 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,793 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf1 2024-12-10T00:27:05,794 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:05,802 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4 2024-12-10T00:27:05,802 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:05,802 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,804 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf2 2024-12-10T00:27:05,804 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:05,804 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:05,804 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,805 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,807 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,807 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,807 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,808 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T00:27:05,810 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,811 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 0273fa7e527747c1d22b1fb928589bc9; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68203468, jitterRate=0.01631087064743042}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T00:27:05,811 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:05,812 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 0273fa7e527747c1d22b1fb928589bc9: Running coprocessor pre-open hook at 1733790425788Writing region info on filesystem at 1733790425788Initializing all the Stores at 1733790425790 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790425791 (+1 ms)Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790425791Cleaning up temporary data from old regions at 1733790425807 (+16 ms)Running coprocessor post-open hooks at 1733790425811 (+4 ms)Region opened successfully at 1733790425812 (+1 ms) 2024-12-10T00:27:05,813 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., pid=9, masterSystemTime=1733790425780 2024-12-10T00:27:05,816 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,816 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:05,817 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPEN, openSeqNum=9, regionLocation=a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:05,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727 because future has completed 2024-12-10T00:27:05,825 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-10T00:27:05,825 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727 in 197 msec 2024-12-10T00:27:05,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE in 587 msec 2024-12-10T00:27:05,845 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T00:27:05,847 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T00:27:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.2:53462 deadline: 1733790485850, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=39473 startCode=1733790409727. As of locationSeqNum=5. 2024-12-10T00:27:05,872 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=39473 startCode=1733790409727. As of locationSeqNum=5. 2024-12-10T00:27:05,873 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=39473 startCode=1733790409727. As of locationSeqNum=5. 2024-12-10T00:27:05,873 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,39473,1733790409727, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=39473 startCode=1733790409727. As of locationSeqNum=5. 2024-12-10T00:27:05,986 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T00:27:05,988 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58584, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T00:27:05,997 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0273fa7e527747c1d22b1fb928589bc9 2/2 column families, dataSize=50 B heapSize=720 B 2024-12-10T00:27:06,013 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf1/0557063fdff345c580f997aa58ef2a83 is 29, key is r1/cf1:/1733790425989/DeleteFamily/seqid=0 2024-12-10T00:27:06,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741888_1066 (size=4906) 2024-12-10T00:27:06,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741888_1066 (size=4906) 2024-12-10T00:27:06,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741888_1066 (size=4906) 2024-12-10T00:27:06,021 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf1/0557063fdff345c580f997aa58ef2a83 2024-12-10T00:27:06,027 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0557063fdff345c580f997aa58ef2a83 2024-12-10T00:27:06,040 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf2/11bd5cb72b90499bbd5d08c2aa757e95 is 29, key is r1/cf2:/1733790425989/DeleteFamily/seqid=0 2024-12-10T00:27:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741889_1067 (size=4906) 2024-12-10T00:27:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741889_1067 (size=4906) 2024-12-10T00:27:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741889_1067 (size=4906) 2024-12-10T00:27:06,048 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf2/11bd5cb72b90499bbd5d08c2aa757e95 2024-12-10T00:27:06,054 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 11bd5cb72b90499bbd5d08c2aa757e95 2024-12-10T00:27:06,055 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf1/0557063fdff345c580f997aa58ef2a83 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/0557063fdff345c580f997aa58ef2a83 2024-12-10T00:27:06,062 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0557063fdff345c580f997aa58ef2a83 2024-12-10T00:27:06,063 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/0557063fdff345c580f997aa58ef2a83, entries=1, sequenceid=12, filesize=4.8 K 2024-12-10T00:27:06,064 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf2/11bd5cb72b90499bbd5d08c2aa757e95 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/11bd5cb72b90499bbd5d08c2aa757e95 2024-12-10T00:27:06,069 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 11bd5cb72b90499bbd5d08c2aa757e95 2024-12-10T00:27:06,070 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/11bd5cb72b90499bbd5d08c2aa757e95, entries=1, sequenceid=12, filesize=4.8 K 2024-12-10T00:27:06,071 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 0273fa7e527747c1d22b1fb928589bc9 in 74ms, sequenceid=12, compaction requested=false 2024-12-10T00:27:06,071 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0273fa7e527747c1d22b1fb928589bc9: 2024-12-10T00:27:06,073 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T00:27:06,075 DEBUG [Time-limited test {}] regionserver.HStore(1541): 0273fa7e527747c1d22b1fb928589bc9/cf1 is initiating major compaction (all files) 2024-12-10T00:27:06,075 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T00:27:06,075 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:27:06,075 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 0273fa7e527747c1d22b1fb928589bc9/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,076 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4, hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/0557063fdff345c580f997aa58ef2a83] into tmpdir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp, totalSize=9.5 K 2024-12-10T00:27:06,077 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7505668472174e6d9e3df22c4dcd63c4, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733790425222 2024-12-10T00:27:06,077 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0557063fdff345c580f997aa58ef2a83, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-10T00:27:06,088 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 0273fa7e527747c1d22b1fb928589bc9#cf1#compaction#16 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T00:27:06,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741890_1068 (size=4626) 2024-12-10T00:27:06,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741890_1068 (size=4626) 2024-12-10T00:27:06,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741890_1068 (size=4626) 2024-12-10T00:27:06,103 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf1/3710e89542f74db3bd09e319669f1b95 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/3710e89542f74db3bd09e319669f1b95 2024-12-10T00:27:06,117 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 0273fa7e527747c1d22b1fb928589bc9/cf1 of 0273fa7e527747c1d22b1fb928589bc9 into 3710e89542f74db3bd09e319669f1b95(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T00:27:06,117 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 0273fa7e527747c1d22b1fb928589bc9: 2024-12-10T00:27:06,117 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-10T00:27:06,117 DEBUG [Time-limited test {}] regionserver.HStore(1541): 0273fa7e527747c1d22b1fb928589bc9/cf2 is initiating major compaction (all files) 2024-12-10T00:27:06,117 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T00:27:06,117 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T00:27:06,117 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 0273fa7e527747c1d22b1fb928589bc9/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,117 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/11bd5cb72b90499bbd5d08c2aa757e95] into tmpdir=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp, totalSize=4.8 K 2024-12-10T00:27:06,118 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 11bd5cb72b90499bbd5d08c2aa757e95, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-10T00:27:06,124 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 0273fa7e527747c1d22b1fb928589bc9#cf2#compaction#17 average throughput is 0.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T00:27:06,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741891_1069 (size=4592) 2024-12-10T00:27:06,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741891_1069 (size=4592) 2024-12-10T00:27:06,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741891_1069 (size=4592) 2024-12-10T00:27:06,136 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/.tmp/cf2/f01aef9c14cc4e8481c5af386dc9ae09 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/f01aef9c14cc4e8481c5af386dc9ae09 2024-12-10T00:27:06,143 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 0273fa7e527747c1d22b1fb928589bc9/cf2 of 0273fa7e527747c1d22b1fb928589bc9 into f01aef9c14cc4e8481c5af386dc9ae09(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T00:27:06,143 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 0273fa7e527747c1d22b1fb928589bc9: 2024-12-10T00:27:06,148 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=0273fa7e527747c1d22b1fb928589bc9, source=a67c4886b4f7,39473,1733790409727, destination=a67c4886b4f7,36039,1733790409906, warming up region on a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:06,148 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=0273fa7e527747c1d22b1fb928589bc9, source=a67c4886b4f7,39473,1733790409727, destination=a67c4886b4f7,36039,1733790409906, running balancer 2024-12-10T00:27:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE 2024-12-10T00:27:06,150 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE 2024-12-10T00:27:06,151 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=CLOSING, regionLocation=a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:06,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(7855): Warmup {ENCODED => 0273fa7e527747c1d22b1fb928589bc9, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:06,152 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE because future has completed 2024-12-10T00:27:06,154 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf1 2024-12-10T00:27:06,154 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:06,154 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T00:27:06,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727}] 2024-12-10T00:27:06,163 INFO [StoreFileOpener-0273fa7e527747c1d22b1fb928589bc9-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0557063fdff345c580f997aa58ef2a83 2024-12-10T00:27:06,163 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/0557063fdff345c580f997aa58ef2a83 2024-12-10T00:27:06,169 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/3710e89542f74db3bd09e319669f1b95 2024-12-10T00:27:06,175 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4 2024-12-10T00:27:06,175 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:06,175 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,176 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf2 2024-12-10T00:27:06,176 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:06,183 INFO [StoreFileOpener-0273fa7e527747c1d22b1fb928589bc9-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 11bd5cb72b90499bbd5d08c2aa757e95 2024-12-10T00:27:06,183 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/11bd5cb72b90499bbd5d08c2aa757e95 2024-12-10T00:27:06,189 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/f01aef9c14cc4e8481c5af386dc9ae09 2024-12-10T00:27:06,189 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(1722): Closing 0273fa7e527747c1d22b1fb928589bc9, disabling compactions & flushes 2024-12-10T00:27:06,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. after waiting 0 ms 2024-12-10T00:27:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(1676): Region close journal for 0273fa7e527747c1d22b1fb928589bc9: Waiting for close lock at 1733790426190Disabling compacts and flushes for region at 1733790426190Disabling writes for close at 1733790426190Writing region close event to WAL at 1733790426191 (+1 ms)Closed at 1733790426191 2024-12-10T00:27:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-12-10T00:27:06,308 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,309 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-10T00:27:06,309 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 0273fa7e527747c1d22b1fb928589bc9, disabling compactions & flushes 2024-12-10T00:27:06,309 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,309 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,309 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. after waiting 0 ms 2024-12-10T00:27:06,309 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,309 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4, hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/0557063fdff345c580f997aa58ef2a83] to archive 2024-12-10T00:27:06,312 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T00:27:06,317 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/0557063fdff345c580f997aa58ef2a83 to hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/0557063fdff345c580f997aa58ef2a83 2024-12-10T00:27:06,317 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4 to hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/7505668472174e6d9e3df22c4dcd63c4 2024-12-10T00:27:06,329 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/11bd5cb72b90499bbd5d08c2aa757e95] to archive 2024-12-10T00:27:06,331 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T00:27:06,333 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/11bd5cb72b90499bbd5d08c2aa757e95 to hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/11bd5cb72b90499bbd5d08c2aa757e95 2024-12-10T00:27:06,339 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-12-10T00:27:06,340 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,340 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 0273fa7e527747c1d22b1fb928589bc9: Waiting for close lock at 1733790426309Running coprocessor pre-close hooks at 1733790426309Disabling compacts and flushes for region at 1733790426309Disabling writes for close at 1733790426309Writing region close event to WAL at 1733790426335 (+26 ms)Running coprocessor post-close hooks at 1733790426340 (+5 ms)Closed at 1733790426340 2024-12-10T00:27:06,340 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 0273fa7e527747c1d22b1fb928589bc9 move to a67c4886b4f7,36039,1733790409906 record at close sequenceid=12 2024-12-10T00:27:06,343 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,344 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=CLOSED 2024-12-10T00:27:06,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727 because future has completed 2024-12-10T00:27:06,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-10T00:27:06,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727 in 194 msec 2024-12-10T00:27:06,352 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE; state=CLOSED, location=a67c4886b4f7,36039,1733790409906; forceNewPlan=false, retain=false 2024-12-10T00:27:06,503 INFO [a67c4886b4f7:41433 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T00:27:06,503 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPENING, regionLocation=a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:06,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE because future has completed 2024-12-10T00:27:06,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906}] 2024-12-10T00:27:06,667 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,668 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 0273fa7e527747c1d22b1fb928589bc9, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:06,668 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,668 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:06,668 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,669 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,670 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,672 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf1 2024-12-10T00:27:06,672 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:06,679 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/3710e89542f74db3bd09e319669f1b95 2024-12-10T00:27:06,679 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:06,680 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,681 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf2 2024-12-10T00:27:06,682 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:06,690 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/f01aef9c14cc4e8481c5af386dc9ae09 2024-12-10T00:27:06,691 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:06,691 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,692 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,693 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,694 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,694 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,695 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T00:27:06,696 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,697 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 0273fa7e527747c1d22b1fb928589bc9; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64846440, jitterRate=-0.03371274471282959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T00:27:06,697 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,698 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 0273fa7e527747c1d22b1fb928589bc9: Running coprocessor pre-open hook at 1733790426669Writing region info on filesystem at 1733790426669Initializing all the Stores at 1733790426670 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790426670Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790426670Cleaning up temporary data from old regions at 1733790426694 (+24 ms)Running coprocessor post-open hooks at 1733790426697 (+3 ms)Region opened successfully at 1733790426698 (+1 ms) 2024-12-10T00:27:06,699 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., pid=12, masterSystemTime=1733790426659 2024-12-10T00:27:06,702 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,702 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,702 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPEN, openSeqNum=18, regionLocation=a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:06,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906 because future has completed 2024-12-10T00:27:06,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-10T00:27:06,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,36039,1733790409906 in 200 msec 2024-12-10T00:27:06,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, REOPEN/MOVE in 562 msec 2024-12-10T00:27:06,752 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T00:27:06,755 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53470, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T00:27:06,757 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server a67c4886b4f7,36039,1733790409906: testing ***** 2024-12-10T00:27:06,757 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-10T00:27:06,760 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-10T00:27:06,762 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-10T00:27:06,765 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-10T00:27:06,766 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-10T00:27:06,774 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 320623120 }, "NonHeapMemoryUsage": { "committed": 171180032, "init": 7667712, "max": -1, "used": 168533536 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "a67c4886b4f7", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2071, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 1, "ProcessCallTime_max": 10, "ProcessCallTime_mean": 4, "ProcessCallTime_25th_percentile": 3, "ProcessCallTime_median": 5, "ProcessCallTime_75th_percentile": 7, "ProcessCallTime_90th_percentile": 9, "ProcessCallTime_95th_percentile": 9, "ProcessCallTime_98th_percentile": 9, "ProcessCallTime_99th_percentile": 9, "ProcessCallTime_99.9th_percentile": 9, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 1, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 11, "TotalCallTime_mean": 4, "TotalCallTime_25th_percentile": 3, "TotalCallTime_median": 6, "TotalCallTime_75th_percentile": 8, "TotalCallTime_90th_percentile": 10, "TotalCallTime_95th_percentile": 10, "TotalCallTime_98th_percentile": 10, "TotalCallTime_99th_percentile": 10, "TotalCallTime_99.9th_percentile": 10, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 174, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 130, "ResponseSize_90th_percentile": 156, "ResponseSize_95th_percentile": 165, "ResponseSize_98th_percentile": 170, "ResponseSize_99th_percentile": 172, "ResponseSize_99.9th_percentile": 173, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 348 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "a67c4886b4f7", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:51780", "tag.serverName": "a67c4886b4f7,36039,1733790409906", "tag.clusterId": "d36e6add-ccf9-4630-ab24-60fe546afd9a", "tag.Context": "regionserver", "tag.Hostname": "a67c4886b4f7", "regionCount": 0, "storeCount": 0, "hlogFileCount": 1, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1733790409906, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 0, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 0, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 0, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 0, "ScanTime_min": 0, "ScanTime_max": 0, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 0, "ScanTime_75th_percentile": 0, "ScanTime_90th_percentile": 0, "ScanTime_95th_percentile": 0, "ScanTime_98th_percentile": 0, "ScanTime_99th_percentile": 0, "ScanTime_99.9th_percentile": 0, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 0, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-12-10T00:27:06,777 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41433 {}] master.MasterRpcServices(700): a67c4886b4f7,36039,1733790409906 reported a fatal error: ***** ABORTING region server a67c4886b4f7,36039,1733790409906: testing ***** 2024-12-10T00:27:06,779 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a67c4886b4f7,36039,1733790409906' ***** 2024-12-10T00:27:06,779 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-12-10T00:27:06,779 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T00:27:06,779 INFO [RS:1;a67c4886b4f7:36039 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-12-10T00:27:06,779 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T00:27:06,780 INFO [RS:1;a67c4886b4f7:36039 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-12-10T00:27:06,780 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(3091): Received CLOSE for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39473 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.2:58584 deadline: 1733790486780, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=36039 startCode=1733790409906. As of locationSeqNum=12. 2024-12-10T00:27:06,780 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(956): aborting server a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:06,780 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T00:27:06,781 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0273fa7e527747c1d22b1fb928589bc9, disabling compactions & flushes 2024-12-10T00:27:06,781 INFO [RS:1;a67c4886b4f7:36039 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a67c4886b4f7:36039. 2024-12-10T00:27:06,781 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,781 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,39473,1733790409727, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,39473,1733790409727, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=36039 startCode=1733790409906. As of locationSeqNum=12. 2024-12-10T00:27:06,781 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,781 DEBUG [RS:1;a67c4886b4f7:36039 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T00:27:06,781 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,39473,1733790409727, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=36039 startCode=1733790409906. As of locationSeqNum=12. 2024-12-10T00:27:06,781 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. after waiting 0 ms 2024-12-10T00:27:06,781 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,39473,1733790409727, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=a67c4886b4f7 port=36039 startCode=1733790409906. As of locationSeqNum=12. 2024-12-10T00:27:06,781 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,781 DEBUG [RS:1;a67c4886b4f7:36039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:06,782 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T00:27:06,782 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1325): Online Regions={0273fa7e527747c1d22b1fb928589bc9=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.} 2024-12-10T00:27:06,782 DEBUG [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1351): Waiting on 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:06,783 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,783 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0273fa7e527747c1d22b1fb928589bc9: Waiting for close lock at 1733790426780Running coprocessor pre-close hooks at 1733790426781 (+1 ms)Disabling compacts and flushes for region at 1733790426781Disabling writes for close at 1733790426781Writing region close event to WAL at 1733790426783 (+2 ms)Running coprocessor post-close hooks at 1733790426783Closed at 1733790426783 2024-12-10T00:27:06,784 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:06,859 INFO [regionserver/a67c4886b4f7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T00:27:06,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server a67c4886b4f7,36039,1733790409906 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:06,888 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=12, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server a67c4886b4f7,36039,1733790409906 aborting 2024-12-10T00:27:06,888 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=12 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server a67c4886b4f7,36039,1733790409906 aborting 2024-12-10T00:27:06,888 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=12 from cache 2024-12-10T00:27:06,982 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(976): stopping server a67c4886b4f7,36039,1733790409906; all regions closed. 2024-12-10T00:27:06,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741833_1009 (size=1407) 2024-12-10T00:27:06,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741833_1009 (size=1407) 2024-12-10T00:27:06,991 DEBUG [RS:1;a67c4886b4f7:36039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:06,991 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T00:27:06,991 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T00:27:06,992 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.ChoreService(370): Chore service for: regionserver/a67c4886b4f7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T00:27:06,992 INFO [regionserver/a67c4886b4f7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T00:27:06,992 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T00:27:06,992 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T00:27:06,992 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T00:27:06,993 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T00:27:06,993 INFO [RS:1;a67c4886b4f7:36039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36039 2024-12-10T00:27:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T00:27:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a67c4886b4f7,36039,1733790409906 2024-12-10T00:27:07,084 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T00:27:07,086 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a67c4886b4f7,36039,1733790409906] 2024-12-10T00:27:07,102 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18] 2024-12-10T00:27:07,104 WARN [RPCClient-NioEventLoopGroup-6-5 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server a67c4886b4f7:36039 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: a67c4886b4f7/172.17.0.2:36039 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:07,105 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18, error=java.net.ConnectException: Call to address=a67c4886b4f7:36039 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: a67c4886b4f7/172.17.0.2:36039 2024-12-10T00:27:07,105 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18 is java.net.ConnectException: Connection refused 2024-12-10T00:27:07,105 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18 from cache 2024-12-10T00:27:07,106 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.FailedServers(52): Added failed server with address a67c4886b4f7:36039 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: a67c4886b4f7/172.17.0.2:36039 2024-12-10T00:27:07,136 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a67c4886b4f7,36039,1733790409906 already deleted, retry=false 2024-12-10T00:27:07,137 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of a67c4886b4f7,36039,1733790409906 on a67c4886b4f7,41433,1733790408551 2024-12-10T00:27:07,142 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure a67c4886b4f7,36039,1733790409906, splitWal=true, meta=false 2024-12-10T00:27:07,145 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for a67c4886b4f7,36039,1733790409906 (carryingMeta=false) a67c4886b4f7,36039,1733790409906/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@1baf3822[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-12-10T00:27:07,146 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure a67c4886b4f7,36039,1733790409906, splitWal=true, meta=false 2024-12-10T00:27:07,147 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(207): a67c4886b4f7,36039,1733790409906 had 1 regions 2024-12-10T00:27:07,148 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure a67c4886b4f7,36039,1733790409906, splitWal=true, meta=false, isMeta: false 2024-12-10T00:27:07,149 DEBUG [PEWorker-1 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting 2024-12-10T00:27:07,151 INFO [PEWorker-1 {}] master.SplitWALManager(105): a67c4886b4f7,36039,1733790409906 WAL count=1, meta=false 2024-12-10T00:27:07,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure a67c4886b4f7%2C36039%2C1733790409906.1733790411694}] 2024-12-10T00:27:07,158 DEBUG [PEWorker-2 {}] master.SplitWALManager(158): Acquired split WAL worker=a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:07,160 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure a67c4886b4f7%2C36039%2C1733790409906.1733790411694, worker=a67c4886b4f7,39473,1733790409727}] 2024-12-10T00:27:07,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:07,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x1000d1e23400002, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:07,229 INFO [RS:1;a67c4886b4f7:36039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T00:27:07,230 INFO [RS:1;a67c4886b4f7:36039 {}] regionserver.HRegionServer(1031): Exiting; stopping=a67c4886b4f7,36039,1733790409906; zookeeper connection closed. 2024-12-10T00:27:07,230 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2616d68c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2616d68c 2024-12-10T00:27:07,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39473 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-12-10T00:27:07,336 INFO [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694, size=1.4 K (1407bytes) 2024-12-10T00:27:07,336 INFO [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 2024-12-10T00:27:07,337 INFO [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 after 1ms 2024-12-10T00:27:07,340 DEBUG [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:07,340 INFO [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 took 4ms 2024-12-10T00:27:07,347 DEBUG [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 0273fa7e527747c1d22b1fb928589bc9: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-12-10T00:27:07,347 DEBUG [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 so closing down 2024-12-10T00:27:07,347 DEBUG [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:07,348 INFO [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:07,348 INFO [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 7 ms; skipped=6; WAL=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694, size=1.4 K, length=1407, corrupted=false, cancelled=false 2024-12-10T00:27:07,348 DEBUG [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694, journal: Splitting hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694, size=1.4 K (1407bytes) at 1733790427336Finishing writing output for hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 so closing down at 1733790427347 (+11 ms)3 split writer threads finished at 1733790427348 (+1 ms)Processed 6 edits across 0 Regions in 7 ms; skipped=6; WAL=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694, size=1.4 K, length=1407, corrupted=false, cancelled=false at 1733790427348 2024-12-10T00:27:07,348 DEBUG [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 2024-12-10T00:27:07,349 DEBUG [RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-10T00:27:07,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41433 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-12-10T00:27:07,355 INFO [PEWorker-4 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting/a67c4886b4f7%2C36039%2C1733790409906.1733790411694 to hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs 2024-12-10T00:27:07,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-10T00:27:07,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure a67c4886b4f7%2C36039%2C1733790409906.1733790411694, worker=a67c4886b4f7,39473,1733790409727 in 197 msec 2024-12-10T00:27:07,360 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:07,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-10T00:27:07,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure a67c4886b4f7%2C36039%2C1733790409906.1733790411694, worker=a67c4886b4f7,39473,1733790409727 in 209 msec 2024-12-10T00:27:07,365 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting dir is empty, no logs to split. 2024-12-10T00:27:07,365 INFO [PEWorker-1 {}] master.SplitWALManager(105): a67c4886b4f7,36039,1733790409906 WAL count=0, meta=false 2024-12-10T00:27:07,365 DEBUG [PEWorker-1 {}] procedure.ServerCrashProcedure(329): Check if a67c4886b4f7,36039,1733790409906 WAL splitting is done? wals=0, meta=false 2024-12-10T00:27:07,368 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(321): Remove WAL directory for a67c4886b4f7,36039,1733790409906 failed, ignore...File hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/WALs/a67c4886b4f7,36039,1733790409906-splitting does not exist. 2024-12-10T00:27:07,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN}] 2024-12-10T00:27:07,373 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN 2024-12-10T00:27:07,374 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-10T00:27:07,420 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18] 2024-12-10T00:27:07,420 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.AbstractRpcClient(357): Not trying to connect to a67c4886b4f7:36039 this server is in the failed servers list 2024-12-10T00:27:07,421 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=a67c4886b4f7:36039 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: a67c4886b4f7:36039 2024-12-10T00:27:07,421 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: a67c4886b4f7:36039 2024-12-10T00:27:07,421 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,36039,1733790409906, seqNum=18 from cache 2024-12-10T00:27:07,525 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(204): Hosts are {a67c4886b4f7=0} racks are {/default-rack=0} 2024-12-10T00:27:07,526 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T00:27:07,526 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T00:27:07,526 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T00:27:07,526 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T00:27:07,526 INFO [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T00:27:07,526 INFO [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T00:27:07,526 DEBUG [a67c4886b4f7:41433 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T00:27:07,526 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPENING, regionLocation=a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:07,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN because future has completed 2024-12-10T00:27:07,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727}] 2024-12-10T00:27:07,685 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:07,686 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 0273fa7e527747c1d22b1fb928589bc9, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:07,686 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,686 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:07,686 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7794): checking encryption for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,686 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7797): checking classloading for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,688 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,688 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf1 2024-12-10T00:27:07,689 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:07,695 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf1/3710e89542f74db3bd09e319669f1b95 2024-12-10T00:27:07,696 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:07,696 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,697 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0273fa7e527747c1d22b1fb928589bc9 columnFamilyName cf2 2024-12-10T00:27:07,697 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:07,704 DEBUG [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/cf2/f01aef9c14cc4e8481c5af386dc9ae09 2024-12-10T00:27:07,704 INFO [StoreOpener-0273fa7e527747c1d22b1fb928589bc9-1 {}] regionserver.HStore(327): Store=0273fa7e527747c1d22b1fb928589bc9/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:07,704 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1038): replaying wal for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,705 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,706 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,707 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1048): stopping wal replay for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,707 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,708 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T00:27:07,709 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1093): writing seq id for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,710 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1114): Opened 0273fa7e527747c1d22b1fb928589bc9; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73816010, jitterRate=0.09994426369667053}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T00:27:07,710 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:07,710 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1006): Region open journal for 0273fa7e527747c1d22b1fb928589bc9: Running coprocessor pre-open hook at 1733790427686Writing region info on filesystem at 1733790427686Initializing all the Stores at 1733790427687 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790427687Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790427687Cleaning up temporary data from old regions at 1733790427707 (+20 ms)Running coprocessor post-open hooks at 1733790427710 (+3 ms)Region opened successfully at 1733790427710 2024-12-10T00:27:07,711 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., pid=17, masterSystemTime=1733790427682 2024-12-10T00:27:07,714 DEBUG [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:07,714 INFO [RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:07,715 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=0273fa7e527747c1d22b1fb928589bc9, regionState=OPEN, openSeqNum=18, regionLocation=a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:07,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727 because future has completed 2024-12-10T00:27:07,721 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-12-10T00:27:07,721 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 0273fa7e527747c1d22b1fb928589bc9, server=a67c4886b4f7,39473,1733790409727 in 189 msec 2024-12-10T00:27:07,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-12-10T00:27:07,724 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(291): removed crashed server a67c4886b4f7,36039,1733790409906 after splitting done 2024-12-10T00:27:07,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0273fa7e527747c1d22b1fb928589bc9, ASSIGN in 350 msec 2024-12-10T00:27:07,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure a67c4886b4f7,36039,1733790409906, splitWal=true, meta=false in 586 msec 2024-12-10T00:27:07,929 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9., hostname=a67c4886b4f7,39473,1733790409727, seqNum=18] 2024-12-10T00:27:07,942 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=403 (was 400) Potentially hanging thread: RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/a67c4886b4f7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1682919945_22 at /127.0.0.1:42530 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_767011405_22 at /127.0.0.1:36402 [Waiting for operation #24] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_LOG_REPLAY_OPS-regionserver/a67c4886b4f7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1682919945_22 at /127.0.0.1:45722 [Waiting for operation #23] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1024 (was 995) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=292 (was 318), ProcessCount=11 (was 11), AvailableMemoryMB=7396 (was 7458) 2024-12-10T00:27:07,952 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=403, OpenFileDescriptor=1024, MaxFileDescriptor=1048576, SystemLoadAverage=292, ProcessCount=11, AvailableMemoryMB=7396 2024-12-10T00:27:07,965 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:07,966 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:07,967 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:27:07,969 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-66243243, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-66243243, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:07,980 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-66243243/hregion-66243243.1733790427970, exclude list is [], retry=0 2024-12-10T00:27:07,983 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:07,983 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:07,983 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:07,985 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-66243243/hregion-66243243.1733790427970 2024-12-10T00:27:07,985 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:07,986 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 906ffb8261ae6ee8a65a5082ad70d3e6, NAME => 'testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:27:07,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741893_1071 (size=67) 2024-12-10T00:27:07,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741893_1071 (size=67) 2024-12-10T00:27:07,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741893_1071 (size=67) 2024-12-10T00:27:07,994 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:07,996 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:07,997 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName a 2024-12-10T00:27:07,997 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:07,998 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:07,998 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:07,999 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName b 2024-12-10T00:27:08,000 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,000 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,000 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,002 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName c 2024-12-10T00:27:08,002 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,002 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,003 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,003 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,003 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,004 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,004 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,005 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:08,006 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,008 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:27:08,008 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 906ffb8261ae6ee8a65a5082ad70d3e6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64433348, jitterRate=-0.039868295192718506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:08,009 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 906ffb8261ae6ee8a65a5082ad70d3e6: Writing region info on filesystem at 1733790427994Initializing all the Stores at 1733790427995 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790427995Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790427995Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790427995Cleaning up temporary data from old regions at 1733790428004 (+9 ms)Region opened successfully at 1733790428009 (+5 ms) 2024-12-10T00:27:08,009 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 906ffb8261ae6ee8a65a5082ad70d3e6, disabling compactions & flushes 2024-12-10T00:27:08,009 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,009 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,009 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. after waiting 0 ms 2024-12-10T00:27:08,009 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,010 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,010 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 906ffb8261ae6ee8a65a5082ad70d3e6: Waiting for close lock at 1733790428009Disabling compacts and flushes for region at 1733790428009Disabling writes for close at 1733790428009Writing region close event to WAL at 1733790428010 (+1 ms)Closed at 1733790428010 2024-12-10T00:27:08,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741892_1070 (size=95) 2024-12-10T00:27:08,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741892_1070 (size=95) 2024-12-10T00:27:08,017 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-66243243/hregion-66243243.1733790427970 not finished, retry = 0 2024-12-10T00:27:08,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741892_1070 (size=95) 2024-12-10T00:27:08,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:08,120 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-66243243:(num 1733790427970) 2024-12-10T00:27:08,120 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:08,123 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:08,142 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123, exclude list is [], retry=0 2024-12-10T00:27:08,145 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:08,146 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:08,146 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:08,153 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123 2024-12-10T00:27:08,153 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:08,153 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 906ffb8261ae6ee8a65a5082ad70d3e6, NAME => 'testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:08,153 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:08,154 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,154 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,156 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,157 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName a 2024-12-10T00:27:08,157 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,157 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,158 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,158 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName b 2024-12-10T00:27:08,158 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,159 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,159 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,160 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName c 2024-12-10T00:27:08,160 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,160 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,160 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,161 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,163 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,164 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,164 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,165 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:08,166 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,167 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 906ffb8261ae6ee8a65a5082ad70d3e6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60938589, jitterRate=-0.09194426238536835}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:08,168 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 906ffb8261ae6ee8a65a5082ad70d3e6: Writing region info on filesystem at 1733790428154Initializing all the Stores at 1733790428155 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428155Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428156 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428156Cleaning up temporary data from old regions at 1733790428164 (+8 ms)Region opened successfully at 1733790428168 (+4 ms) 2024-12-10T00:27:08,195 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 906ffb8261ae6ee8a65a5082ad70d3e6 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-10T00:27:08,211 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/a/26740a862e364016b67b76b16389299c is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733790428168/Put/seqid=0 2024-12-10T00:27:08,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741895_1073 (size=5958) 2024-12-10T00:27:08,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741895_1073 (size=5958) 2024-12-10T00:27:08,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741895_1073 (size=5958) 2024-12-10T00:27:08,227 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/a/26740a862e364016b67b76b16389299c 2024-12-10T00:27:08,256 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/b/53decdf8029b4386af7eb4ca0f7dda97 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733790428178/Put/seqid=0 2024-12-10T00:27:08,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741896_1074 (size=5958) 2024-12-10T00:27:08,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741896_1074 (size=5958) 2024-12-10T00:27:08,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741896_1074 (size=5958) 2024-12-10T00:27:08,281 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/b/53decdf8029b4386af7eb4ca0f7dda97 2024-12-10T00:27:08,316 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/c/28e6a997d53b423eaeabb6eac48621eb is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733790428185/Put/seqid=0 2024-12-10T00:27:08,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741897_1075 (size=5958) 2024-12-10T00:27:08,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741897_1075 (size=5958) 2024-12-10T00:27:08,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741897_1075 (size=5958) 2024-12-10T00:27:08,357 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/c/28e6a997d53b423eaeabb6eac48621eb 2024-12-10T00:27:08,370 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/a/26740a862e364016b67b76b16389299c as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/a/26740a862e364016b67b76b16389299c 2024-12-10T00:27:08,381 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/a/26740a862e364016b67b76b16389299c, entries=10, sequenceid=33, filesize=5.8 K 2024-12-10T00:27:08,383 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/b/53decdf8029b4386af7eb4ca0f7dda97 as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/b/53decdf8029b4386af7eb4ca0f7dda97 2024-12-10T00:27:08,391 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/b/53decdf8029b4386af7eb4ca0f7dda97, entries=10, sequenceid=33, filesize=5.8 K 2024-12-10T00:27:08,393 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/c/28e6a997d53b423eaeabb6eac48621eb as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/c/28e6a997d53b423eaeabb6eac48621eb 2024-12-10T00:27:08,400 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/c/28e6a997d53b423eaeabb6eac48621eb, entries=10, sequenceid=33, filesize=5.8 K 2024-12-10T00:27:08,402 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 906ffb8261ae6ee8a65a5082ad70d3e6 in 207ms, sequenceid=33, compaction requested=false 2024-12-10T00:27:08,402 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 906ffb8261ae6ee8a65a5082ad70d3e6: 2024-12-10T00:27:08,402 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 906ffb8261ae6ee8a65a5082ad70d3e6, disabling compactions & flushes 2024-12-10T00:27:08,402 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,402 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,402 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. after waiting 0 ms 2024-12-10T00:27:08,403 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,404 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. 2024-12-10T00:27:08,404 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 906ffb8261ae6ee8a65a5082ad70d3e6: Waiting for close lock at 1733790428402Disabling compacts and flushes for region at 1733790428402Disabling writes for close at 1733790428403 (+1 ms)Writing region close event to WAL at 1733790428404 (+1 ms)Closed at 1733790428404 2024-12-10T00:27:08,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741894_1072 (size=3384) 2024-12-10T00:27:08,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741894_1072 (size=3384) 2024-12-10T00:27:08,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741894_1072 (size=3384) 2024-12-10T00:27:08,417 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/b/53decdf8029b4386af7eb4ca0f7dda97 to hdfs://localhost:34093/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/b/53decdf8029b4386af7eb4ca0f7dda97 2024-12-10T00:27:08,439 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123, size=3.3 K (3384bytes) 2024-12-10T00:27:08,439 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123 2024-12-10T00:27:08,439 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123 after 0ms 2024-12-10T00:27:08,442 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:08,442 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123 took 3ms 2024-12-10T00:27:08,445 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123 so closing down 2024-12-10T00:27:08,445 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:08,446 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733790428123.temp 2024-12-10T00:27:08,447 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000003-wal.1733790428123.temp 2024-12-10T00:27:08,447 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:08,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741898_1076 (size=2944) 2024-12-10T00:27:08,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741898_1076 (size=2944) 2024-12-10T00:27:08,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741898_1076 (size=2944) 2024-12-10T00:27:08,454 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000003-wal.1733790428123.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:08,456 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000003-wal.1733790428123.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000032 2024-12-10T00:27:08,456 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 13 ms; skipped=2; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123, size=3.3 K, length=3384, corrupted=false, cancelled=false 2024-12-10T00:27:08,456 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123, journal: Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123, size=3.3 K (3384bytes) at 1733790428439Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123 so closing down at 1733790428445 (+6 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000003-wal.1733790428123.temp at 1733790428447 (+2 ms)3 split writer threads finished at 1733790428447Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000003-wal.1733790428123.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733790428454 (+7 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000003-wal.1733790428123.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000032 at 1733790428456 (+2 ms)Processed 32 edits across 1 Regions in 13 ms; skipped=2; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123, size=3.3 K, length=3384, corrupted=false, cancelled=false at 1733790428456 2024-12-10T00:27:08,458 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428123 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790428123 2024-12-10T00:27:08,460 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000032 2024-12-10T00:27:08,460 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:08,462 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:08,474 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428463, exclude list is [], retry=0 2024-12-10T00:27:08,477 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:08,478 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:08,478 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:08,483 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428463 2024-12-10T00:27:08,483 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:27:08,483 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 906ffb8261ae6ee8a65a5082ad70d3e6, NAME => 'testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:08,483 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:08,483 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,483 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,485 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,485 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName a 2024-12-10T00:27:08,485 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,490 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/a/26740a862e364016b67b76b16389299c 2024-12-10T00:27:08,490 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,491 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,492 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName b 2024-12-10T00:27:08,492 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,493 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,493 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,494 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906ffb8261ae6ee8a65a5082ad70d3e6 columnFamilyName c 2024-12-10T00:27:08,494 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,500 DEBUG [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/c/28e6a997d53b423eaeabb6eac48621eb 2024-12-10T00:27:08,501 INFO [StoreOpener-906ffb8261ae6ee8a65a5082ad70d3e6-1 {}] regionserver.HStore(327): Store=906ffb8261ae6ee8a65a5082ad70d3e6/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,501 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,502 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,504 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,504 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000032 2024-12-10T00:27:08,507 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:08,509 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000032 2024-12-10T00:27:08,509 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 906ffb8261ae6ee8a65a5082ad70d3e6 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-10T00:27:08,526 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/b/e0196e7a52164d76b5263d8058f3fc04 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733790428178/Put/seqid=0 2024-12-10T00:27:08,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741900_1078 (size=5958) 2024-12-10T00:27:08,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741900_1078 (size=5958) 2024-12-10T00:27:08,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741900_1078 (size=5958) 2024-12-10T00:27:08,537 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/b/e0196e7a52164d76b5263d8058f3fc04 2024-12-10T00:27:08,544 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/.tmp/b/e0196e7a52164d76b5263d8058f3fc04 as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/b/e0196e7a52164d76b5263d8058f3fc04 2024-12-10T00:27:08,550 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/b/e0196e7a52164d76b5263d8058f3fc04, entries=10, sequenceid=32, filesize=5.8 K 2024-12-10T00:27:08,550 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 906ffb8261ae6ee8a65a5082ad70d3e6 in 41ms, sequenceid=32, compaction requested=false; wal=null 2024-12-10T00:27:08,551 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/0000000000000000032 2024-12-10T00:27:08,552 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,553 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,553 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:08,555 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 906ffb8261ae6ee8a65a5082ad70d3e6 2024-12-10T00:27:08,558 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/906ffb8261ae6ee8a65a5082ad70d3e6/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-12-10T00:27:08,559 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 906ffb8261ae6ee8a65a5082ad70d3e6; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71991181, jitterRate=0.07275219261646271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:08,560 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 906ffb8261ae6ee8a65a5082ad70d3e6: Writing region info on filesystem at 1733790428483Initializing all the Stores at 1733790428484 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428484Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428484Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428484Obtaining lock to block concurrent updates at 1733790428509 (+25 ms)Preparing flush snapshotting stores in 906ffb8261ae6ee8a65a5082ad70d3e6 at 1733790428509Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1733790428509Flushing stores of testReplayEditsWrittenViaHRegion,,1733790427965.906ffb8261ae6ee8a65a5082ad70d3e6. at 1733790428509Flushing 906ffb8261ae6ee8a65a5082ad70d3e6/b: creating writer at 1733790428509Flushing 906ffb8261ae6ee8a65a5082ad70d3e6/b: appending metadata at 1733790428525 (+16 ms)Flushing 906ffb8261ae6ee8a65a5082ad70d3e6/b: closing flushed file at 1733790428525Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50d3e97: reopening flushed file at 1733790428543 (+18 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 906ffb8261ae6ee8a65a5082ad70d3e6 in 41ms, sequenceid=32, compaction requested=false; wal=null at 1733790428550 (+7 ms)Cleaning up temporary data from old regions at 1733790428553 (+3 ms)Region opened successfully at 1733790428560 (+7 ms) 2024-12-10T00:27:08,584 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=413 (was 403) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:36546 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:42674 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:42530 [Waiting for operation #29] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:36402 [Waiting for operation #41] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:45722 [Waiting for operation #32] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:45878 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1100 (was 1024) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=292 (was 292), ProcessCount=11 (was 11), AvailableMemoryMB=7375 (was 7396) 2024-12-10T00:27:08,587 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1100 is superior to 1024 2024-12-10T00:27:08,598 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=413, OpenFileDescriptor=1100, MaxFileDescriptor=1048576, SystemLoadAverage=292, ProcessCount=11, AvailableMemoryMB=7375 2024-12-10T00:27:08,598 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1100 is superior to 1024 2024-12-10T00:27:08,613 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:08,615 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:08,615 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:27:08,618 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-38678942, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-38678942, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:08,630 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-38678942/hregion-38678942.1733790428618, exclude list is [], retry=0 2024-12-10T00:27:08,633 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:08,633 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:08,633 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:08,636 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-38678942/hregion-38678942.1733790428618 2024-12-10T00:27:08,636 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:27:08,636 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 99718257719e4d368331920ea3932dde, NAME => 'testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:27:08,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741902_1080 (size=68) 2024-12-10T00:27:08,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741902_1080 (size=68) 2024-12-10T00:27:08,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741902_1080 (size=68) 2024-12-10T00:27:08,650 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:08,651 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,652 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName a 2024-12-10T00:27:08,653 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,653 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,653 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,655 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName b 2024-12-10T00:27:08,655 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,655 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,655 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,657 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName c 2024-12-10T00:27:08,657 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,658 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,658 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,659 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,659 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,660 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,661 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,661 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:08,663 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,666 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:27:08,667 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 99718257719e4d368331920ea3932dde; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66893971, jitterRate=-0.0032021552324295044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:08,667 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 99718257719e4d368331920ea3932dde: Writing region info on filesystem at 1733790428650Initializing all the Stores at 1733790428650Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428651 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428651Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428651Cleaning up temporary data from old regions at 1733790428661 (+10 ms)Region opened successfully at 1733790428667 (+6 ms) 2024-12-10T00:27:08,668 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 99718257719e4d368331920ea3932dde, disabling compactions & flushes 2024-12-10T00:27:08,668 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:08,668 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:08,668 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. after waiting 0 ms 2024-12-10T00:27:08,668 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:08,668 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:08,668 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 99718257719e4d368331920ea3932dde: Waiting for close lock at 1733790428667Disabling compacts and flushes for region at 1733790428668 (+1 ms)Disabling writes for close at 1733790428668Writing region close event to WAL at 1733790428668Closed at 1733790428668 2024-12-10T00:27:08,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741901_1079 (size=95) 2024-12-10T00:27:08,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741901_1079 (size=95) 2024-12-10T00:27:08,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741901_1079 (size=95) 2024-12-10T00:27:08,678 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:08,678 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-38678942:(num 1733790428618) 2024-12-10T00:27:08,679 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:08,682 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:08,697 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682, exclude list is [], retry=0 2024-12-10T00:27:08,700 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:08,700 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:08,700 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:08,707 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682 2024-12-10T00:27:08,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:27:08,784 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 99718257719e4d368331920ea3932dde, NAME => 'testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:08,786 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,786 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:08,786 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,786 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,788 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,789 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName a 2024-12-10T00:27:08,790 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,790 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,790 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,791 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName b 2024-12-10T00:27:08,791 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,792 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,792 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,793 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName c 2024-12-10T00:27:08,793 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:08,793 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:08,793 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,794 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,795 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,796 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,796 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,797 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:08,799 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,799 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 99718257719e4d368331920ea3932dde; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68165420, jitterRate=0.015743911266326904}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:08,800 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:08,800 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 99718257719e4d368331920ea3932dde: Running coprocessor pre-open hook at 1733790428786Writing region info on filesystem at 1733790428787 (+1 ms)Initializing all the Stores at 1733790428788 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428788Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428788Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790428788Cleaning up temporary data from old regions at 1733790428796 (+8 ms)Running coprocessor post-open hooks at 1733790428800 (+4 ms)Region opened successfully at 1733790428800 2024-12-10T00:27:08,820 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 99718257719e4d368331920ea3932dde 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-12-10T00:27:08,821 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:08,935 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-12-10T00:27:08,935 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-12-10T00:27:08,937 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-12-10T00:27:08,937 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-12-10T00:27:08,939 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-12-10T00:27:08,939 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-12-10T00:27:08,940 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-12-10T00:27:08,940 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-12-10T00:27:08,941 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-12-10T00:27:08,941 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-12-10T00:27:09,822 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:10,468 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T00:27:10,823 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:11,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741833_1009 (size=1407) 2024-12-10T00:27:11,824 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:12,824 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:13,825 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:14,826 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:15,828 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:16,828 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:17,724 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T00:27:17,829 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 99718257719e4d368331920ea3932dde/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:17,832 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 99718257719e4d368331920ea3932dde: 2024-12-10T00:27:17,832 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:17,849 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 99718257719e4d368331920ea3932dde: 2024-12-10T00:27:17,849 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-12-10T00:27:17,849 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 99718257719e4d368331920ea3932dde, disabling compactions & flushes 2024-12-10T00:27:17,849 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:17,849 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:17,849 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. after waiting 0 ms 2024-12-10T00:27:17,849 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:17,850 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:17,850 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. 2024-12-10T00:27:17,850 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 99718257719e4d368331920ea3932dde: Waiting for close lock at 1733790437849Running coprocessor pre-close hooks at 1733790437849Disabling compacts and flushes for region at 1733790437849Disabling writes for close at 1733790437849Writing region close event to WAL at 1733790437850 (+1 ms)Running coprocessor post-close hooks at 1733790437850Closed at 1733790437850 2024-12-10T00:27:17,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741903_1081 (size=2691) 2024-12-10T00:27:17,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741903_1081 (size=2691) 2024-12-10T00:27:17,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741903_1081 (size=2691) 2024-12-10T00:27:17,873 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682, size=2.6 K (2691bytes) 2024-12-10T00:27:17,873 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682 2024-12-10T00:27:17,874 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682 after 1ms 2024-12-10T00:27:17,876 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:17,876 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682 took 3ms 2024-12-10T00:27:17,879 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682 so closing down 2024-12-10T00:27:17,879 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:17,880 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1733790428682.temp 2024-12-10T00:27:17,881 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000004-wal.1733790428682.temp 2024-12-10T00:27:17,881 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741904_1082 (size=2094) 2024-12-10T00:27:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741904_1082 (size=2094) 2024-12-10T00:27:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741904_1082 (size=2094) 2024-12-10T00:27:17,894 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000004-wal.1733790428682.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:17,895 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000004-wal.1733790428682.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000026 2024-12-10T00:27:17,895 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 18 ms; skipped=3; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682, size=2.6 K, length=2691, corrupted=false, cancelled=false 2024-12-10T00:27:17,895 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682, journal: Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682, size=2.6 K (2691bytes) at 1733790437873Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682 so closing down at 1733790437879 (+6 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000004-wal.1733790428682.temp at 1733790437881 (+2 ms)3 split writer threads finished at 1733790437881Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000004-wal.1733790428682.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1733790437894 (+13 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000004-wal.1733790428682.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000026 at 1733790437895 (+1 ms)Processed 23 edits across 1 Regions in 18 ms; skipped=3; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682, size=2.6 K, length=2691, corrupted=false, cancelled=false at 1733790437895 2024-12-10T00:27:17,897 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790428682 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790428682 2024-12-10T00:27:17,898 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000026 2024-12-10T00:27:17,898 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:17,900 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:17,912 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790437900, exclude list is [], retry=0 2024-12-10T00:27:17,915 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:17,915 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:17,916 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:17,917 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790437900 2024-12-10T00:27:17,917 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:17,918 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 99718257719e4d368331920ea3932dde, NAME => 'testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:17,918 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,918 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:17,919 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,919 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,921 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,922 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName a 2024-12-10T00:27:17,922 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:17,923 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:17,923 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,924 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName b 2024-12-10T00:27:17,924 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:17,925 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:17,925 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,926 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99718257719e4d368331920ea3932dde columnFamilyName c 2024-12-10T00:27:17,926 DEBUG [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:17,927 INFO [StoreOpener-99718257719e4d368331920ea3932dde-1 {}] regionserver.HStore(327): Store=99718257719e4d368331920ea3932dde/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:17,927 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,928 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,929 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde 2024-12-10T00:27:17,930 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000026 2024-12-10T00:27:17,933 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:17,935 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000026 2024-12-10T00:27:17,935 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 99718257719e4d368331920ea3932dde 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-12-10T00:27:17,955 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/a/ce3a1574da2c47d4aeffb58ae7c4ec3d is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1733790437841/Put/seqid=0 2024-12-10T00:27:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741906_1084 (size=5523) 2024-12-10T00:27:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741906_1084 (size=5523) 2024-12-10T00:27:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741906_1084 (size=5523) 2024-12-10T00:27:17,968 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/a/ce3a1574da2c47d4aeffb58ae7c4ec3d 2024-12-10T00:27:17,989 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/b/fb075e9e08be4341961f321086a01b95 is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1733790437833/Put/seqid=0 2024-12-10T00:27:17,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741907_1085 (size=5524) 2024-12-10T00:27:17,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741907_1085 (size=5524) 2024-12-10T00:27:17,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741907_1085 (size=5524) 2024-12-10T00:27:17,997 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/b/fb075e9e08be4341961f321086a01b95 2024-12-10T00:27:18,015 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/c/02b7fb3482834424aa8aaa353e333db6 is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1733790437839/Put/seqid=0 2024-12-10T00:27:18,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741908_1086 (size=5457) 2024-12-10T00:27:18,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741908_1086 (size=5457) 2024-12-10T00:27:18,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741908_1086 (size=5457) 2024-12-10T00:27:18,022 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/c/02b7fb3482834424aa8aaa353e333db6 2024-12-10T00:27:18,028 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/a/ce3a1574da2c47d4aeffb58ae7c4ec3d as hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/a/ce3a1574da2c47d4aeffb58ae7c4ec3d 2024-12-10T00:27:18,033 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/a/ce3a1574da2c47d4aeffb58ae7c4ec3d, entries=7, sequenceid=26, filesize=5.4 K 2024-12-10T00:27:18,034 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/b/fb075e9e08be4341961f321086a01b95 as hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/b/fb075e9e08be4341961f321086a01b95 2024-12-10T00:27:18,040 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/b/fb075e9e08be4341961f321086a01b95, entries=7, sequenceid=26, filesize=5.4 K 2024-12-10T00:27:18,041 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/.tmp/c/02b7fb3482834424aa8aaa353e333db6 as hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/c/02b7fb3482834424aa8aaa353e333db6 2024-12-10T00:27:18,046 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/c/02b7fb3482834424aa8aaa353e333db6, entries=6, sequenceid=26, filesize=5.3 K 2024-12-10T00:27:18,046 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 99718257719e4d368331920ea3932dde in 111ms, sequenceid=26, compaction requested=false; wal=null 2024-12-10T00:27:18,047 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/0000000000000000026 2024-12-10T00:27:18,048 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:18,048 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:18,049 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:18,050 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:18,052 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsAfterAbortingFlush/99718257719e4d368331920ea3932dde/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-12-10T00:27:18,053 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 99718257719e4d368331920ea3932dde; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59420925, jitterRate=-0.11455921828746796}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:18,053 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99718257719e4d368331920ea3932dde 2024-12-10T00:27:18,054 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 99718257719e4d368331920ea3932dde: Running coprocessor pre-open hook at 1733790437919Writing region info on filesystem at 1733790437919Initializing all the Stores at 1733790437920 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790437920Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790437920Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790437920Obtaining lock to block concurrent updates at 1733790437935 (+15 ms)Preparing flush snapshotting stores in 99718257719e4d368331920ea3932dde at 1733790437935Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1733790437935Flushing stores of testReplayEditsAfterAbortingFlush,,1733790428613.99718257719e4d368331920ea3932dde. at 1733790437935Flushing 99718257719e4d368331920ea3932dde/a: creating writer at 1733790437935Flushing 99718257719e4d368331920ea3932dde/a: appending metadata at 1733790437955 (+20 ms)Flushing 99718257719e4d368331920ea3932dde/a: closing flushed file at 1733790437955Flushing 99718257719e4d368331920ea3932dde/b: creating writer at 1733790437974 (+19 ms)Flushing 99718257719e4d368331920ea3932dde/b: appending metadata at 1733790437989 (+15 ms)Flushing 99718257719e4d368331920ea3932dde/b: closing flushed file at 1733790437989Flushing 99718257719e4d368331920ea3932dde/c: creating writer at 1733790438002 (+13 ms)Flushing 99718257719e4d368331920ea3932dde/c: appending metadata at 1733790438015 (+13 ms)Flushing 99718257719e4d368331920ea3932dde/c: closing flushed file at 1733790438015Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7595b94d: reopening flushed file at 1733790438027 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34b3b639: reopening flushed file at 1733790438033 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@db9c6f9: reopening flushed file at 1733790438040 (+7 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 99718257719e4d368331920ea3932dde in 111ms, sequenceid=26, compaction requested=false; wal=null at 1733790438046 (+6 ms)Cleaning up temporary data from old regions at 1733790438048 (+2 ms)Running coprocessor post-open hooks at 1733790438054 (+6 ms)Region opened successfully at 1733790438054 2024-12-10T00:27:18,073 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=413 (was 413), OpenFileDescriptor=1158 (was 1100) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=277 (was 292), ProcessCount=11 (was 11), AvailableMemoryMB=7245 (was 7375) 2024-12-10T00:27:18,074 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1158 is superior to 1024 2024-12-10T00:27:18,085 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=413, OpenFileDescriptor=1158, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=7244 2024-12-10T00:27:18,085 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1158 is superior to 1024 2024-12-10T00:27:18,099 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:18,100 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:18,101 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:27:18,103 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-54002040, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-54002040, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:18,115 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-54002040/hregion-54002040.1733790438104, exclude list is [], retry=0 2024-12-10T00:27:18,118 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:18,118 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:18,118 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:18,120 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-54002040/hregion-54002040.1733790438104 2024-12-10T00:27:18,121 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:18,121 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 1e83fda40b73b3ebe9c0c889cb8fead9, NAME => 'testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:27:18,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741910_1088 (size=61) 2024-12-10T00:27:18,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741910_1088 (size=61) 2024-12-10T00:27:18,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741910_1088 (size=61) 2024-12-10T00:27:18,130 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:18,133 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,134 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e83fda40b73b3ebe9c0c889cb8fead9 columnFamilyName a 2024-12-10T00:27:18,135 DEBUG [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,135 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(327): Store=1e83fda40b73b3ebe9c0c889cb8fead9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,135 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,136 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,136 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,137 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,137 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,138 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,141 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:27:18,142 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1e83fda40b73b3ebe9c0c889cb8fead9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74287648, jitterRate=0.10697221755981445}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:27:18,142 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1e83fda40b73b3ebe9c0c889cb8fead9: Writing region info on filesystem at 1733790438130Initializing all the Stores at 1733790438131 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438131Cleaning up temporary data from old regions at 1733790438137 (+6 ms)Region opened successfully at 1733790438142 (+5 ms) 2024-12-10T00:27:18,142 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1e83fda40b73b3ebe9c0c889cb8fead9, disabling compactions & flushes 2024-12-10T00:27:18,142 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,142 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,142 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. after waiting 0 ms 2024-12-10T00:27:18,142 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,143 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,143 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1e83fda40b73b3ebe9c0c889cb8fead9: Waiting for close lock at 1733790438142Disabling compacts and flushes for region at 1733790438142Disabling writes for close at 1733790438142Writing region close event to WAL at 1733790438143 (+1 ms)Closed at 1733790438143 2024-12-10T00:27:18,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741909_1087 (size=95) 2024-12-10T00:27:18,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741909_1087 (size=95) 2024-12-10T00:27:18,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741909_1087 (size=95) 2024-12-10T00:27:18,149 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:18,150 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-54002040:(num 1733790438104) 2024-12-10T00:27:18,150 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:18,152 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:18,168 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152, exclude list is [], retry=0 2024-12-10T00:27:18,171 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:18,171 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:18,171 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:18,173 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 2024-12-10T00:27:18,173 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:18,173 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e83fda40b73b3ebe9c0c889cb8fead9, NAME => 'testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:18,174 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:18,174 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,174 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,175 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,176 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e83fda40b73b3ebe9c0c889cb8fead9 columnFamilyName a 2024-12-10T00:27:18,176 DEBUG [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,177 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(327): Store=1e83fda40b73b3ebe9c0c889cb8fead9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,177 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,178 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,179 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,179 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,179 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,181 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,182 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1e83fda40b73b3ebe9c0c889cb8fead9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59694342, jitterRate=-0.11048498749732971}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:27:18,182 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1e83fda40b73b3ebe9c0c889cb8fead9: Writing region info on filesystem at 1733790438174Initializing all the Stores at 1733790438175 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438175Cleaning up temporary data from old regions at 1733790438179 (+4 ms)Region opened successfully at 1733790438182 (+3 ms) 2024-12-10T00:27:18,190 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1e83fda40b73b3ebe9c0c889cb8fead9, disabling compactions & flushes 2024-12-10T00:27:18,190 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,190 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,190 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. after waiting 0 ms 2024-12-10T00:27:18,190 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,191 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,191 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. 2024-12-10T00:27:18,191 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1e83fda40b73b3ebe9c0c889cb8fead9: Waiting for close lock at 1733790438190Disabling compacts and flushes for region at 1733790438190Disabling writes for close at 1733790438190Writing region close event to WAL at 1733790438191 (+1 ms)Closed at 1733790438191 2024-12-10T00:27:18,194 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 not finished, retry = 0 2024-12-10T00:27:18,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741911_1089 (size=1050) 2024-12-10T00:27:18,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741911_1089 (size=1050) 2024-12-10T00:27:18,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741911_1089 (size=1050) 2024-12-10T00:27:18,314 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152, size=1.0 K (1050bytes) 2024-12-10T00:27:18,314 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 2024-12-10T00:27:18,315 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 after 1ms 2024-12-10T00:27:18,317 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:18,317 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 took 3ms 2024-12-10T00:27:18,318 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 so closing down 2024-12-10T00:27:18,319 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:18,320 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733790438152.temp 2024-12-10T00:27:18,321 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000003-wal.1733790438152.temp 2024-12-10T00:27:18,321 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741912_1090 (size=1050) 2024-12-10T00:27:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741912_1090 (size=1050) 2024-12-10T00:27:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741912_1090 (size=1050) 2024-12-10T00:27:18,327 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000003-wal.1733790438152.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:18,328 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000003-wal.1733790438152.temp to hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012 2024-12-10T00:27:18,328 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 11 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-12-10T00:27:18,328 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152, journal: Splitting hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152, size=1.0 K (1050bytes) at 1733790438314Finishing writing output for hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 so closing down at 1733790438319 (+5 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000003-wal.1733790438152.temp at 1733790438321 (+2 ms)3 split writer threads finished at 1733790438321Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000003-wal.1733790438152.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1733790438327 (+6 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000003-wal.1733790438152.temp to hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012 at 1733790438328 (+1 ms)Processed 10 edits across 1 Regions in 11 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1733790438328 2024-12-10T00:27:18,329 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438152 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790438152 2024-12-10T00:27:18,330 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012 2024-12-10T00:27:18,333 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:18,631 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:18,633 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:18,643 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438633, exclude list is [], retry=0 2024-12-10T00:27:18,646 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:18,646 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:18,647 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:18,648 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438633 2024-12-10T00:27:18,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:18,648 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e83fda40b73b3ebe9c0c889cb8fead9, NAME => 'testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:18,649 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:18,649 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,649 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,653 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,655 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e83fda40b73b3ebe9c0c889cb8fead9 columnFamilyName a 2024-12-10T00:27:18,656 DEBUG [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,657 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(327): Store=1e83fda40b73b3ebe9c0c889cb8fead9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,657 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,658 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,660 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,661 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012 2024-12-10T00:27:18,663 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:18,664 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012 2024-12-10T00:27:18,664 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1e83fda40b73b3ebe9c0c889cb8fead9 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-12-10T00:27:18,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/.tmp/a/944cc94b48274c11949c52ffdb635b37 is 79, key is testDatalossWhenInputError/a:x0/1733790438182/Put/seqid=0 2024-12-10T00:27:18,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741914_1092 (size=5808) 2024-12-10T00:27:18,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741914_1092 (size=5808) 2024-12-10T00:27:18,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741914_1092 (size=5808) 2024-12-10T00:27:18,694 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/.tmp/a/944cc94b48274c11949c52ffdb635b37 2024-12-10T00:27:18,704 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/.tmp/a/944cc94b48274c11949c52ffdb635b37 as hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/a/944cc94b48274c11949c52ffdb635b37 2024-12-10T00:27:18,714 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/a/944cc94b48274c11949c52ffdb635b37, entries=10, sequenceid=12, filesize=5.7 K 2024-12-10T00:27:18,714 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 1e83fda40b73b3ebe9c0c889cb8fead9 in 50ms, sequenceid=12, compaction requested=false; wal=null 2024-12-10T00:27:18,715 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/0000000000000000012 2024-12-10T00:27:18,716 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,716 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,719 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,721 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-10T00:27:18,722 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1e83fda40b73b3ebe9c0c889cb8fead9; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59291953, jitterRate=-0.11648105084896088}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:27:18,723 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1e83fda40b73b3ebe9c0c889cb8fead9: Writing region info on filesystem at 1733790438649Initializing all the Stores at 1733790438652 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438652Obtaining lock to block concurrent updates at 1733790438665 (+13 ms)Preparing flush snapshotting stores in 1e83fda40b73b3ebe9c0c889cb8fead9 at 1733790438665Finished memstore snapshotting testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1733790438665Flushing stores of testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9. at 1733790438665Flushing 1e83fda40b73b3ebe9c0c889cb8fead9/a: creating writer at 1733790438665Flushing 1e83fda40b73b3ebe9c0c889cb8fead9/a: appending metadata at 1733790438686 (+21 ms)Flushing 1e83fda40b73b3ebe9c0c889cb8fead9/a: closing flushed file at 1733790438686Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61db772b: reopening flushed file at 1733790438702 (+16 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 1e83fda40b73b3ebe9c0c889cb8fead9 in 50ms, sequenceid=12, compaction requested=false; wal=null at 1733790438714 (+12 ms)Cleaning up temporary data from old regions at 1733790438716 (+2 ms)Region opened successfully at 1733790438723 (+7 ms) 2024-12-10T00:27:18,725 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e83fda40b73b3ebe9c0c889cb8fead9, NAME => 'testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:18,725 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733790438099.1e83fda40b73b3ebe9c0c889cb8fead9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:18,725 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,726 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,727 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,728 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e83fda40b73b3ebe9c0c889cb8fead9 columnFamilyName a 2024-12-10T00:27:18,728 DEBUG [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,733 DEBUG [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/a/944cc94b48274c11949c52ffdb635b37 2024-12-10T00:27:18,733 INFO [StoreOpener-1e83fda40b73b3ebe9c0c889cb8fead9-1 {}] regionserver.HStore(327): Store=1e83fda40b73b3ebe9c0c889cb8fead9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,733 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,734 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,735 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,736 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,736 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,737 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1e83fda40b73b3ebe9c0c889cb8fead9 2024-12-10T00:27:18,740 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testDatalossWhenInputError/1e83fda40b73b3ebe9c0c889cb8fead9/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-12-10T00:27:18,740 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1e83fda40b73b3ebe9c0c889cb8fead9; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64508372, jitterRate=-0.03875035047531128}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T00:27:18,741 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1e83fda40b73b3ebe9c0c889cb8fead9: Writing region info on filesystem at 1733790438726Initializing all the Stores at 1733790438727 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438727Cleaning up temporary data from old regions at 1733790438736 (+9 ms)Region opened successfully at 1733790438741 (+5 ms) 2024-12-10T00:27:18,755 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=423 (was 413) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:56792 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:56700 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:41482 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:38150 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:41528 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:38104 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1240 (was 1158) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=277 (was 277), ProcessCount=11 (was 11), AvailableMemoryMB=7232 (was 7244) 2024-12-10T00:27:18,755 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1240 is superior to 1024 2024-12-10T00:27:18,766 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=423, OpenFileDescriptor=1240, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=7232 2024-12-10T00:27:18,766 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1240 is superior to 1024 2024-12-10T00:27:18,782 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:18,784 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:18,785 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:27:18,787 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-72231219, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-72231219, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:18,800 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-72231219/hregion-72231219.1733790438788, exclude list is [], retry=0 2024-12-10T00:27:18,803 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:18,803 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:18,803 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:18,805 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-72231219/hregion-72231219.1733790438788 2024-12-10T00:27:18,805 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:27:18,805 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 69bc6dff07a46b2aaf7b3d57148d677f, NAME => 'testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:27:18,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741916_1094 (size=63) 2024-12-10T00:27:18,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741916_1094 (size=63) 2024-12-10T00:27:18,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741916_1094 (size=63) 2024-12-10T00:27:18,815 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:18,817 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,818 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName a 2024-12-10T00:27:18,818 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,819 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,819 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,820 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName b 2024-12-10T00:27:18,820 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,820 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,821 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,822 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName c 2024-12-10T00:27:18,822 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,822 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,823 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,823 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,824 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,825 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,825 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,825 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:18,826 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,829 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:27:18,829 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 69bc6dff07a46b2aaf7b3d57148d677f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69265416, jitterRate=0.03213512897491455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:18,830 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 69bc6dff07a46b2aaf7b3d57148d677f: Writing region info on filesystem at 1733790438816Initializing all the Stores at 1733790438816Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438816Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438817 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438817Cleaning up temporary data from old regions at 1733790438825 (+8 ms)Region opened successfully at 1733790438830 (+5 ms) 2024-12-10T00:27:18,830 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 69bc6dff07a46b2aaf7b3d57148d677f, disabling compactions & flushes 2024-12-10T00:27:18,830 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:18,830 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:18,830 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. after waiting 0 ms 2024-12-10T00:27:18,830 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:18,830 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:18,831 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 69bc6dff07a46b2aaf7b3d57148d677f: Waiting for close lock at 1733790438830Disabling compacts and flushes for region at 1733790438830Disabling writes for close at 1733790438830Writing region close event to WAL at 1733790438830Closed at 1733790438830 2024-12-10T00:27:18,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741915_1093 (size=95) 2024-12-10T00:27:18,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741915_1093 (size=95) 2024-12-10T00:27:18,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741915_1093 (size=95) 2024-12-10T00:27:18,836 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:18,836 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-72231219:(num 1733790438788) 2024-12-10T00:27:18,836 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:18,838 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:18,850 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838, exclude list is [], retry=0 2024-12-10T00:27:18,853 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:18,853 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:18,853 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:18,855 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 2024-12-10T00:27:18,856 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:27:18,856 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 69bc6dff07a46b2aaf7b3d57148d677f, NAME => 'testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:18,856 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:18,856 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,856 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,858 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,858 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName a 2024-12-10T00:27:18,858 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,859 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,859 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,859 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName b 2024-12-10T00:27:18,860 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,860 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,860 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,860 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName c 2024-12-10T00:27:18,861 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:18,861 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:18,861 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,862 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,863 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,864 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,864 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,864 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:18,866 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:18,866 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 69bc6dff07a46b2aaf7b3d57148d677f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62429508, jitterRate=-0.0697278380393982}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:18,867 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 69bc6dff07a46b2aaf7b3d57148d677f: Writing region info on filesystem at 1733790438856Initializing all the Stores at 1733790438857 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438857Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438857Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790438857Cleaning up temporary data from old regions at 1733790438864 (+7 ms)Region opened successfully at 1733790438867 (+3 ms) 2024-12-10T00:27:18,871 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1733790438871/Put/seqid=0 2024-12-10T00:27:18,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741918_1096 (size=4875) 2024-12-10T00:27:18,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741918_1096 (size=4875) 2024-12-10T00:27:18,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741918_1096 (size=4875) 2024-12-10T00:27:18,879 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1733790438879/Put/seqid=0 2024-12-10T00:27:18,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741919_1097 (size=4875) 2024-12-10T00:27:18,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741919_1097 (size=4875) 2024-12-10T00:27:18,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741919_1097 (size=4875) 2024-12-10T00:27:18,888 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1733790438888/Put/seqid=0 2024-12-10T00:27:18,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741920_1098 (size=4875) 2024-12-10T00:27:18,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741920_1098 (size=4875) 2024-12-10T00:27:18,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741920_1098 (size=4875) 2024-12-10T00:27:18,896 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 69bc6dff07a46b2aaf7b3d57148d677f/a 2024-12-10T00:27:18,900 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-12-10T00:27:18,900 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T00:27:18,901 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 69bc6dff07a46b2aaf7b3d57148d677f/a 2024-12-10T00:27:18,904 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-12-10T00:27:18,904 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T00:27:18,904 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 69bc6dff07a46b2aaf7b3d57148d677f/a 2024-12-10T00:27:18,907 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-12-10T00:27:18,907 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T00:27:18,908 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 69bc6dff07a46b2aaf7b3d57148d677f 3/3 column families, dataSize=51 B heapSize=896 B 2024-12-10T00:27:18,921 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/.tmp/a/17b672753cb74a9f95ed33b2b8274c8a is 55, key is testCompactedBulkLoadedFiles/a:a/1733790438867/Put/seqid=0 2024-12-10T00:27:18,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741921_1099 (size=5107) 2024-12-10T00:27:18,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741921_1099 (size=5107) 2024-12-10T00:27:18,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741921_1099 (size=5107) 2024-12-10T00:27:18,928 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/.tmp/a/17b672753cb74a9f95ed33b2b8274c8a 2024-12-10T00:27:18,934 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/.tmp/a/17b672753cb74a9f95ed33b2b8274c8a as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a 2024-12-10T00:27:18,935 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-12-10T00:27:18,935 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-12-10T00:27:18,936 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-12-10T00:27:18,936 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-12-10T00:27:18,939 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a, entries=1, sequenceid=4, filesize=5.0 K 2024-12-10T00:27:18,940 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 69bc6dff07a46b2aaf7b3d57148d677f in 32ms, sequenceid=4, compaction requested=false 2024-12-10T00:27:18,940 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 69bc6dff07a46b2aaf7b3d57148d677f: 2024-12-10T00:27:18,942 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_ 2024-12-10T00:27:18,943 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_ 2024-12-10T00:27:18,944 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_ 2024-12-10T00:27:18,944 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile0 into 69bc6dff07a46b2aaf7b3d57148d677f/a as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_ - updating store file list. 2024-12-10T00:27:18,949 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:18,949 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_ into 69bc6dff07a46b2aaf7b3d57148d677f/a 2024-12-10T00:27:18,949 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile0 into 69bc6dff07a46b2aaf7b3d57148d677f/a (new location: hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_) 2024-12-10T00:27:18,950 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile1 into 69bc6dff07a46b2aaf7b3d57148d677f/a as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_ - updating store file list. 2024-12-10T00:27:18,955 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for ff5bcb9304564be7b52631e96b09b12c_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:18,955 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_ into 69bc6dff07a46b2aaf7b3d57148d677f/a 2024-12-10T00:27:18,955 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile1 into 69bc6dff07a46b2aaf7b3d57148d677f/a (new location: hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_) 2024-12-10T00:27:18,957 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile2 into 69bc6dff07a46b2aaf7b3d57148d677f/a as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_ - updating store file list. 2024-12-10T00:27:18,961 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5935dd25e0574bdba1558652380e134f_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:18,961 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_ into 69bc6dff07a46b2aaf7b3d57148d677f/a 2024-12-10T00:27:18,961 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34093/hbase/testCompactedBulkLoadedFiles/hfile2 into 69bc6dff07a46b2aaf7b3d57148d677f/a (new location: hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_) 2024-12-10T00:27:18,969 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T00:27:18,969 DEBUG [Time-limited test {}] regionserver.HStore(1541): 69bc6dff07a46b2aaf7b3d57148d677f/a is initiating major compaction (all files) 2024-12-10T00:27:18,969 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 69bc6dff07a46b2aaf7b3d57148d677f/a in testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:18,969 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_] into tmpdir=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/.tmp, totalSize=19.3 K 2024-12-10T00:27:18,970 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 17b672753cb74a9f95ed33b2b8274c8a, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1733790438867 2024-12-10T00:27:18,970 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-10T00:27:18,971 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ff5bcb9304564be7b52631e96b09b12c_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-10T00:27:18,971 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5935dd25e0574bdba1558652380e134f_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-10T00:27:18,983 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/.tmp/a/322f7be5fa1749bab0be52a38968da7a is 55, key is testCompactedBulkLoadedFiles/a:a/1733790438867/Put/seqid=0 2024-12-10T00:27:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741922_1100 (size=6154) 2024-12-10T00:27:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741922_1100 (size=6154) 2024-12-10T00:27:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741922_1100 (size=6154) 2024-12-10T00:27:19,402 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/.tmp/a/322f7be5fa1749bab0be52a38968da7a as hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/322f7be5fa1749bab0be52a38968da7a 2024-12-10T00:27:19,409 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 69bc6dff07a46b2aaf7b3d57148d677f/a of 69bc6dff07a46b2aaf7b3d57148d677f into 322f7be5fa1749bab0be52a38968da7a(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T00:27:19,409 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 69bc6dff07a46b2aaf7b3d57148d677f: 2024-12-10T00:27:19,409 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-10T00:27:19,409 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-10T00:27:19,436 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838, size=0 (0bytes) 2024-12-10T00:27:19,436 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 might be still open, length is 0 2024-12-10T00:27:19,436 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 2024-12-10T00:27:19,437 WARN [IPC Server handler 3 on default port 34093 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 has not been closed. Lease recovery is in progress. RecoveryId = 1101 for block blk_1073741917_1095 2024-12-10T00:27:19,437 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 after 1ms 2024-12-10T00:27:20,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:56830 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:35811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56830 dst: /127.0.0.1:35811 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35811 remote=/127.0.0.1:56830]. Total timeout mills is 60000, 58767 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:20,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:41570 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:34693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41570 dst: /127.0.0.1:34693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:20,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:38174 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:37237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38174 dst: /127.0.0.1:37237 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:20,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741917_1101 (size=1172) 2024-12-10T00:27:20,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741917_1101 (size=1172) 2024-12-10T00:27:20,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741917_1101 (size=1172) 2024-12-10T00:27:23,374 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T00:27:23,438 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 after 4002ms 2024-12-10T00:27:23,440 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:23,440 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 took 4004ms 2024-12-10T00:27:23,442 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838; continuing. 2024-12-10T00:27:23,442 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 so closing down 2024-12-10T00:27:23,442 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:23,444 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733790438838.temp 2024-12-10T00:27:23,445 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000003-wal.1733790438838.temp 2024-12-10T00:27:23,446 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:23,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741923_1102 (size=548) 2024-12-10T00:27:23,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741923_1102 (size=548) 2024-12-10T00:27:23,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741923_1102 (size=548) 2024-12-10T00:27:23,454 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000003-wal.1733790438838.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:23,455 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000003-wal.1733790438838.temp to hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000008 2024-12-10T00:27:23,456 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 16 ms; skipped=3; WAL=hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T00:27:23,456 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838, journal: Splitting hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838, size=0 (0bytes) at 1733790439436Finishing writing output for hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 so closing down at 1733790443442 (+4006 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000003-wal.1733790438838.temp at 1733790443445 (+3 ms)3 split writer threads finished at 1733790443446 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000003-wal.1733790438838.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733790443454 (+8 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000003-wal.1733790438838.temp to hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000008 at 1733790443456 (+2 ms)Processed 5 edits across 1 Regions in 16 ms; skipped=3; WAL=hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838, size=0, length=0, corrupted=false, cancelled=false at 1733790443456 2024-12-10T00:27:23,458 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790438838 2024-12-10T00:27:23,459 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000008 2024-12-10T00:27:23,459 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:23,462 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:23,473 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790443462, exclude list is [], retry=0 2024-12-10T00:27:23,475 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:23,476 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:23,476 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:23,477 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790443462 2024-12-10T00:27:23,478 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:23,478 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 69bc6dff07a46b2aaf7b3d57148d677f, NAME => 'testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:23,478 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:23,478 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,478 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,480 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,480 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName a 2024-12-10T00:27:23,480 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,486 DEBUG [StoreFileOpener-69bc6dff07a46b2aaf7b3d57148d677f-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:23,486 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_ 2024-12-10T00:27:23,490 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a 2024-12-10T00:27:23,494 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/322f7be5fa1749bab0be52a38968da7a 2024-12-10T00:27:23,497 DEBUG [StoreFileOpener-69bc6dff07a46b2aaf7b3d57148d677f-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5935dd25e0574bdba1558652380e134f_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:23,497 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_ 2024-12-10T00:27:23,500 DEBUG [StoreFileOpener-69bc6dff07a46b2aaf7b3d57148d677f-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for ff5bcb9304564be7b52631e96b09b12c_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T00:27:23,500 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_ 2024-12-10T00:27:23,500 WARN [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@337d072d 2024-12-10T00:27:23,500 WARN [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@337d072d 2024-12-10T00:27:23,500 WARN [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@337d072d 2024-12-10T00:27:23,501 WARN [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@337d072d 2024-12-10T00:27:23,501 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_] to archive 2024-12-10T00:27:23,502 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T00:27:23,503 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_ to hdfs://localhost:34093/hbase/archive/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_ 2024-12-10T00:27:23,503 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a to hdfs://localhost:34093/hbase/archive/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/17b672753cb74a9f95ed33b2b8274c8a 2024-12-10T00:27:23,504 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_ to hdfs://localhost:34093/hbase/archive/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/5935dd25e0574bdba1558652380e134f_SeqId_4_ 2024-12-10T00:27:23,504 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_ to hdfs://localhost:34093/hbase/archive/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/a/ff5bcb9304564be7b52631e96b09b12c_SeqId_4_ 2024-12-10T00:27:23,504 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,504 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,505 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName b 2024-12-10T00:27:23,505 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,505 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,505 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,506 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69bc6dff07a46b2aaf7b3d57148d677f columnFamilyName c 2024-12-10T00:27:23,506 DEBUG [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,507 INFO [StoreOpener-69bc6dff07a46b2aaf7b3d57148d677f-1 {}] regionserver.HStore(327): Store=69bc6dff07a46b2aaf7b3d57148d677f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,507 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,507 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,509 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,509 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000008 2024-12-10T00:27:23,511 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:23,513 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 69bc6dff07a46b2aaf7b3d57148d677f : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "69bc6dff07a46b2aaf7b3d57148d677f" family_name: "a" compaction_input: "17b672753cb74a9f95ed33b2b8274c8a" compaction_input: "0c4317a11e1e42dfa65aa4dfdeb807e7_SeqId_4_" compaction_input: "ff5bcb9304564be7b52631e96b09b12c_SeqId_4_" compaction_input: "5935dd25e0574bdba1558652380e134f_SeqId_4_" compaction_output: "322f7be5fa1749bab0be52a38968da7a" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-12-10T00:27:23,513 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-12-10T00:27:23,513 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000008 2024-12-10T00:27:23,514 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/0000000000000000008 2024-12-10T00:27:23,515 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,515 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,515 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:23,517 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 69bc6dff07a46b2aaf7b3d57148d677f 2024-12-10T00:27:23,519 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testCompactedBulkLoadedFiles/69bc6dff07a46b2aaf7b3d57148d677f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T00:27:23,520 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 69bc6dff07a46b2aaf7b3d57148d677f; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70129094, jitterRate=0.04500493407249451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:23,520 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 69bc6dff07a46b2aaf7b3d57148d677f: Writing region info on filesystem at 1733790443478Initializing all the Stores at 1733790443479 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443479Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443479Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443479Cleaning up temporary data from old regions at 1733790443515 (+36 ms)Region opened successfully at 1733790443520 (+5 ms) 2024-12-10T00:27:23,522 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 69bc6dff07a46b2aaf7b3d57148d677f, disabling compactions & flushes 2024-12-10T00:27:23,522 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:23,522 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:23,522 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. after waiting 0 ms 2024-12-10T00:27:23,522 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:23,523 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733790438783.69bc6dff07a46b2aaf7b3d57148d677f. 2024-12-10T00:27:23,523 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 69bc6dff07a46b2aaf7b3d57148d677f: Waiting for close lock at 1733790443522Disabling compacts and flushes for region at 1733790443522Disabling writes for close at 1733790443522Writing region close event to WAL at 1733790443523 (+1 ms)Closed at 1733790443523 2024-12-10T00:27:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741924_1103 (size=95) 2024-12-10T00:27:23,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741924_1103 (size=95) 2024-12-10T00:27:23,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741924_1103 (size=95) 2024-12-10T00:27:23,527 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:23,527 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733790443462) 2024-12-10T00:27:23,538 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=441 (was 423) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1282559765_22 at /127.0.0.1:58822 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:34093 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:34093 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1282559765_22 at /127.0.0.1:38266 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1282559765_22 at /127.0.0.1:37610 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1324 (was 1240) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=255 (was 277), ProcessCount=11 (was 11), AvailableMemoryMB=7094 (was 7232) 2024-12-10T00:27:23,539 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1324 is superior to 1024 2024-12-10T00:27:23,549 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=441, OpenFileDescriptor=1324, MaxFileDescriptor=1048576, SystemLoadAverage=255, ProcessCount=11, AvailableMemoryMB=7093 2024-12-10T00:27:23,549 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1324 is superior to 1024 2024-12-10T00:27:23,562 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:23,563 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T00:27:23,564 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T00:27:23,566 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-30214169, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/hregion-30214169, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:23,577 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-30214169/hregion-30214169.1733790443566, exclude list is [], retry=0 2024-12-10T00:27:23,579 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:23,579 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:23,579 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:23,581 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-30214169/hregion-30214169.1733790443566 2024-12-10T00:27:23,581 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:41935:41935)] 2024-12-10T00:27:23,581 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 2b95f19da34c0fd50f86225b2fe91763, NAME => 'testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34093/hbase 2024-12-10T00:27:23,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741926_1105 (size=67) 2024-12-10T00:27:23,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741926_1105 (size=67) 2024-12-10T00:27:23,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741926_1105 (size=67) 2024-12-10T00:27:23,589 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:23,591 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,593 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName a 2024-12-10T00:27:23,593 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,593 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,593 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,595 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName b 2024-12-10T00:27:23,595 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,596 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,596 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,597 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName c 2024-12-10T00:27:23,597 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,598 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,598 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,599 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,599 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,601 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,601 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,601 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:23,602 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,604 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T00:27:23,605 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2b95f19da34c0fd50f86225b2fe91763; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69493460, jitterRate=0.035533249378204346}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:23,605 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2b95f19da34c0fd50f86225b2fe91763: Writing region info on filesystem at 1733790443589Initializing all the Stores at 1733790443590 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443590Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443591 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443591Cleaning up temporary data from old regions at 1733790443601 (+10 ms)Region opened successfully at 1733790443605 (+4 ms) 2024-12-10T00:27:23,605 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2b95f19da34c0fd50f86225b2fe91763, disabling compactions & flushes 2024-12-10T00:27:23,605 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,605 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,605 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. after waiting 0 ms 2024-12-10T00:27:23,605 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,605 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,605 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2b95f19da34c0fd50f86225b2fe91763: Waiting for close lock at 1733790443605Disabling compacts and flushes for region at 1733790443605Disabling writes for close at 1733790443605Writing region close event to WAL at 1733790443605Closed at 1733790443605 2024-12-10T00:27:23,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741925_1104 (size=95) 2024-12-10T00:27:23,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741925_1104 (size=95) 2024-12-10T00:27:23,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741925_1104 (size=95) 2024-12-10T00:27:23,613 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:23,613 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-30214169:(num 1733790443566) 2024-12-10T00:27:23,613 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:23,616 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:23,633 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616, exclude list is [], retry=0 2024-12-10T00:27:23,635 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:23,636 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:23,636 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:23,637 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616 2024-12-10T00:27:23,637 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533),(127.0.0.1/127.0.0.1:39943:39943)] 2024-12-10T00:27:23,637 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2b95f19da34c0fd50f86225b2fe91763, NAME => 'testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:23,638 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:23,638 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,638 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,639 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,640 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName a 2024-12-10T00:27:23,640 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,640 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,641 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,641 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName b 2024-12-10T00:27:23,642 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,642 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,642 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,643 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName c 2024-12-10T00:27:23,643 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,643 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,643 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,644 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,645 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,646 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,646 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,646 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:23,648 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,649 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2b95f19da34c0fd50f86225b2fe91763; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62870715, jitterRate=-0.06315334141254425}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:23,649 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2b95f19da34c0fd50f86225b2fe91763: Writing region info on filesystem at 1733790443638Initializing all the Stores at 1733790443639 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443639Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443639Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443639Cleaning up temporary data from old regions at 1733790443646 (+7 ms)Region opened successfully at 1733790443649 (+3 ms) 2024-12-10T00:27:23,656 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2b95f19da34c0fd50f86225b2fe91763 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-10T00:27:23,670 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/a/8fffecbb83084c7c8ea6e60fc2812ae9 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733790443649/Put/seqid=0 2024-12-10T00:27:23,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741928_1107 (size=5958) 2024-12-10T00:27:23,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741928_1107 (size=5958) 2024-12-10T00:27:23,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741928_1107 (size=5958) 2024-12-10T00:27:23,677 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/a/8fffecbb83084c7c8ea6e60fc2812ae9 2024-12-10T00:27:23,683 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/a/8fffecbb83084c7c8ea6e60fc2812ae9 as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/a/8fffecbb83084c7c8ea6e60fc2812ae9 2024-12-10T00:27:23,688 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/a/8fffecbb83084c7c8ea6e60fc2812ae9, entries=10, sequenceid=13, filesize=5.8 K 2024-12-10T00:27:23,690 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 2b95f19da34c0fd50f86225b2fe91763 in 33ms, sequenceid=13, compaction requested=false 2024-12-10T00:27:23,690 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2b95f19da34c0fd50f86225b2fe91763: 2024-12-10T00:27:23,703 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2b95f19da34c0fd50f86225b2fe91763, disabling compactions & flushes 2024-12-10T00:27:23,703 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,703 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,703 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. after waiting 0 ms 2024-12-10T00:27:23,703 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,704 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,704 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:23,704 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2b95f19da34c0fd50f86225b2fe91763: Waiting for close lock at 1733790443703Disabling compacts and flushes for region at 1733790443703Disabling writes for close at 1733790443703Writing region close event to WAL at 1733790443704 (+1 ms)Closed at 1733790443704 2024-12-10T00:27:23,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741927_1106 (size=3345) 2024-12-10T00:27:23,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741927_1106 (size=3345) 2024-12-10T00:27:23,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741927_1106 (size=3345) 2024-12-10T00:27:23,720 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616, size=3.3 K (3345bytes) 2024-12-10T00:27:23,720 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616 2024-12-10T00:27:23,721 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616 after 1ms 2024-12-10T00:27:23,723 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:23,723 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616 took 3ms 2024-12-10T00:27:23,724 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616 so closing down 2024-12-10T00:27:23,724 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:23,725 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733790443616.temp 2024-12-10T00:27:23,726 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000003-wal.1733790443616.temp 2024-12-10T00:27:23,726 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741929_1108 (size=2944) 2024-12-10T00:27:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741929_1108 (size=2944) 2024-12-10T00:27:23,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741929_1108 (size=2944) 2024-12-10T00:27:23,732 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000003-wal.1733790443616.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:23,733 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000003-wal.1733790443616.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000035 2024-12-10T00:27:23,733 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 10 ms; skipped=2; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616, size=3.3 K, length=3345, corrupted=false, cancelled=false 2024-12-10T00:27:23,733 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616, journal: Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616, size=3.3 K (3345bytes) at 1733790443720Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616 so closing down at 1733790443724 (+4 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000003-wal.1733790443616.temp at 1733790443726 (+2 ms)3 split writer threads finished at 1733790443726Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000003-wal.1733790443616.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733790443732 (+6 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000003-wal.1733790443616.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000035 at 1733790443733 (+1 ms)Processed 32 edits across 1 Regions in 10 ms; skipped=2; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616, size=3.3 K, length=3345, corrupted=false, cancelled=false at 1733790443733 2024-12-10T00:27:23,735 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443616 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790443616 2024-12-10T00:27:23,736 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000035 2024-12-10T00:27:23,736 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:23,738 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:23,749 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738, exclude list is [], retry=0 2024-12-10T00:27:23,751 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:23,751 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:23,752 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:23,753 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 2024-12-10T00:27:23,753 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:23,753 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2b95f19da34c0fd50f86225b2fe91763, NAME => 'testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763.', STARTKEY => '', ENDKEY => ''} 2024-12-10T00:27:23,753 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:23,753 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,753 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,754 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,755 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName a 2024-12-10T00:27:23,755 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,760 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/a/8fffecbb83084c7c8ea6e60fc2812ae9 2024-12-10T00:27:23,760 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,760 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,761 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName b 2024-12-10T00:27:23,761 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,761 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,761 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,762 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName c 2024-12-10T00:27:23,762 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:23,762 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:23,762 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,763 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,764 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,764 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000035 2024-12-10T00:27:23,766 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:23,767 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000035 2024-12-10T00:27:23,767 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2b95f19da34c0fd50f86225b2fe91763 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-12-10T00:27:23,782 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/b/bc38c8c30c2d4c30b12dd3177d47a491 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733790443690/Put/seqid=0 2024-12-10T00:27:23,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741931_1110 (size=5958) 2024-12-10T00:27:23,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741931_1110 (size=5958) 2024-12-10T00:27:23,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741931_1110 (size=5958) 2024-12-10T00:27:23,788 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/b/bc38c8c30c2d4c30b12dd3177d47a491 2024-12-10T00:27:23,811 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/c/594b09631b3745108d065678f4504924 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733790443695/Put/seqid=0 2024-12-10T00:27:23,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741932_1111 (size=5958) 2024-12-10T00:27:23,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741932_1111 (size=5958) 2024-12-10T00:27:23,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741932_1111 (size=5958) 2024-12-10T00:27:23,817 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/c/594b09631b3745108d065678f4504924 2024-12-10T00:27:23,823 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/b/bc38c8c30c2d4c30b12dd3177d47a491 as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/b/bc38c8c30c2d4c30b12dd3177d47a491 2024-12-10T00:27:23,827 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/b/bc38c8c30c2d4c30b12dd3177d47a491, entries=10, sequenceid=35, filesize=5.8 K 2024-12-10T00:27:23,828 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/c/594b09631b3745108d065678f4504924 as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/c/594b09631b3745108d065678f4504924 2024-12-10T00:27:23,832 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/c/594b09631b3745108d065678f4504924, entries=10, sequenceid=35, filesize=5.8 K 2024-12-10T00:27:23,832 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 2b95f19da34c0fd50f86225b2fe91763 in 65ms, sequenceid=35, compaction requested=false; wal=null 2024-12-10T00:27:23,833 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000035 2024-12-10T00:27:23,834 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,834 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,834 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:23,835 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:23,837 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-12-10T00:27:23,838 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2b95f19da34c0fd50f86225b2fe91763; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70818108, jitterRate=0.055272042751312256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:23,838 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2b95f19da34c0fd50f86225b2fe91763: Writing region info on filesystem at 1733790443753Initializing all the Stores at 1733790443754 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443754Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443754Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790443754Obtaining lock to block concurrent updates at 1733790443767 (+13 ms)Preparing flush snapshotting stores in 2b95f19da34c0fd50f86225b2fe91763 at 1733790443767Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1733790443767Flushing stores of testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. at 1733790443767Flushing 2b95f19da34c0fd50f86225b2fe91763/b: creating writer at 1733790443767Flushing 2b95f19da34c0fd50f86225b2fe91763/b: appending metadata at 1733790443781 (+14 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/b: closing flushed file at 1733790443781Flushing 2b95f19da34c0fd50f86225b2fe91763/c: creating writer at 1733790443793 (+12 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/c: appending metadata at 1733790443810 (+17 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/c: closing flushed file at 1733790443810Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3dd829c9: reopening flushed file at 1733790443822 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d42d9b0: reopening flushed file at 1733790443827 (+5 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 2b95f19da34c0fd50f86225b2fe91763 in 65ms, sequenceid=35, compaction requested=false; wal=null at 1733790443832 (+5 ms)Cleaning up temporary data from old regions at 1733790443834 (+2 ms)Region opened successfully at 1733790443838 (+4 ms) 2024-12-10T00:27:23,895 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738, size=0 (0bytes) 2024-12-10T00:27:23,895 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 might be still open, length is 0 2024-12-10T00:27:23,895 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 2024-12-10T00:27:23,895 WARN [IPC Server handler 2 on default port 34093 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 has not been closed. Lease recovery is in progress. RecoveryId = 1112 for block blk_1073741930_1109 2024-12-10T00:27:23,895 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 after 0ms 2024-12-10T00:27:26,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:38322 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:37237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38322 dst: /127.0.0.1:37237 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37237 remote=/127.0.0.1:38322]. Total timeout mills is 60000, 57220 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:26,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:58868 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:34693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58868 dst: /127.0.0.1:34693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:26,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1400862134_22 at /127.0.0.1:37670 [Receiving block BP-313768122-172.17.0.2-1733790403195:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:35811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37670 dst: /127.0.0.1:35811 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:26,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741930_1112 (size=2936) 2024-12-10T00:27:26,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741930_1112 (size=2936) 2024-12-10T00:27:27,896 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 after 4001ms 2024-12-10T00:27:27,902 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:27,902 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 took 4008ms 2024-12-10T00:27:27,905 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738; continuing. 2024-12-10T00:27:27,905 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 so closing down 2024-12-10T00:27:27,905 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T00:27:27,907 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1733790443738.temp 2024-12-10T00:27:27,909 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000037-wal.1733790443738.temp 2024-12-10T00:27:27,909 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T00:27:27,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741933_1113 (size=2944) 2024-12-10T00:27:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741933_1113 (size=2944) 2024-12-10T00:27:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741933_1113 (size=2944) 2024-12-10T00:27:27,916 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000037-wal.1733790443738.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-10T00:27:27,917 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000037-wal.1733790443738.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000066 2024-12-10T00:27:27,917 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 14 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T00:27:27,917 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738, journal: Splitting hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738, size=0 (0bytes) at 1733790443895Finishing writing output for hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 so closing down at 1733790447905 (+4010 ms)Creating recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000037-wal.1733790443738.temp at 1733790447909 (+4 ms)3 split writer threads finished at 1733790447909Closed recovered edits writer path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000037-wal.1733790443738.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733790447916 (+7 ms)Rename recovered edits hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000037-wal.1733790443738.temp to hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000066 at 1733790447917 (+1 ms)Processed 30 edits across 1 Regions in 14 ms; skipped=0; WAL=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738, size=0, length=0, corrupted=false, cancelled=false at 1733790447917 2024-12-10T00:27:27,919 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 to hdfs://localhost:34093/hbase/oldWALs/wal.1733790443738 2024-12-10T00:27:27,919 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000066 2024-12-10T00:27:27,920 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T00:27:27,921 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34093/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561, archiveDir=hdfs://localhost:34093/hbase/oldWALs, maxLogs=32 2024-12-10T00:27:27,955 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790447921, exclude list is [], retry=0 2024-12-10T00:27:27,957 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37237,DS-4e56f297-94df-44d4-8403-f4675f27f5bf,DISK] 2024-12-10T00:27:27,958 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34693,DS-1c8c0855-c991-44b1-833b-e2ac9409ecea,DISK] 2024-12-10T00:27:27,958 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35811,DS-2d9a9d95-4744-47d9-85d0-93f36535e9bc,DISK] 2024-12-10T00:27:27,959 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790447921 2024-12-10T00:27:27,960 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:41935:41935),(127.0.0.1/127.0.0.1:36533:36533)] 2024-12-10T00:27:27,960 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T00:27:27,961 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:27,962 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName a 2024-12-10T00:27:27,962 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:27,967 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/a/8fffecbb83084c7c8ea6e60fc2812ae9 2024-12-10T00:27:27,967 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:27,967 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:27,968 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName b 2024-12-10T00:27:27,968 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:27,972 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/b/bc38c8c30c2d4c30b12dd3177d47a491 2024-12-10T00:27:27,972 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:27,972 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:27,973 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b95f19da34c0fd50f86225b2fe91763 columnFamilyName c 2024-12-10T00:27:27,973 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T00:27:27,977 DEBUG [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/c/594b09631b3745108d065678f4504924 2024-12-10T00:27:27,977 INFO [StoreOpener-2b95f19da34c0fd50f86225b2fe91763-1 {}] regionserver.HStore(327): Store=2b95f19da34c0fd50f86225b2fe91763/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T00:27:27,978 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:27,978 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:27,979 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:27,980 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000066 2024-12-10T00:27:27,982 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T00:27:27,985 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000066 2024-12-10T00:27:27,985 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2b95f19da34c0fd50f86225b2fe91763 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-10T00:27:27,998 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/a/bab72da070644ac997939df8d0f9839e is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1733790443845/Put/seqid=0 2024-12-10T00:27:28,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741935_1115 (size=5958) 2024-12-10T00:27:28,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741935_1115 (size=5958) 2024-12-10T00:27:28,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741935_1115 (size=5958) 2024-12-10T00:27:28,004 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/a/bab72da070644ac997939df8d0f9839e 2024-12-10T00:27:28,021 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/b/1224055e7dc841d0b59cf82668e3cdc7 is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1733790443851/Put/seqid=0 2024-12-10T00:27:28,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741936_1116 (size=5958) 2024-12-10T00:27:28,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741936_1116 (size=5958) 2024-12-10T00:27:28,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741936_1116 (size=5958) 2024-12-10T00:27:28,026 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/b/1224055e7dc841d0b59cf82668e3cdc7 2024-12-10T00:27:28,043 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/c/aafc52b99e604dfdace88f5974d6e330 is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1733790443857/Put/seqid=0 2024-12-10T00:27:28,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741937_1117 (size=5958) 2024-12-10T00:27:28,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741937_1117 (size=5958) 2024-12-10T00:27:28,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741937_1117 (size=5958) 2024-12-10T00:27:28,050 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/c/aafc52b99e604dfdace88f5974d6e330 2024-12-10T00:27:28,054 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/a/bab72da070644ac997939df8d0f9839e as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/a/bab72da070644ac997939df8d0f9839e 2024-12-10T00:27:28,058 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/a/bab72da070644ac997939df8d0f9839e, entries=10, sequenceid=66, filesize=5.8 K 2024-12-10T00:27:28,059 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/b/1224055e7dc841d0b59cf82668e3cdc7 as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/b/1224055e7dc841d0b59cf82668e3cdc7 2024-12-10T00:27:28,063 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/b/1224055e7dc841d0b59cf82668e3cdc7, entries=10, sequenceid=66, filesize=5.8 K 2024-12-10T00:27:28,064 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/.tmp/c/aafc52b99e604dfdace88f5974d6e330 as hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/c/aafc52b99e604dfdace88f5974d6e330 2024-12-10T00:27:28,068 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/c/aafc52b99e604dfdace88f5974d6e330, entries=10, sequenceid=66, filesize=5.8 K 2024-12-10T00:27:28,068 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 2b95f19da34c0fd50f86225b2fe91763 in 83ms, sequenceid=66, compaction requested=false; wal=null 2024-12-10T00:27:28,069 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/0000000000000000066 2024-12-10T00:27:28,070 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:28,070 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:28,070 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T00:27:28,071 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2b95f19da34c0fd50f86225b2fe91763 2024-12-10T00:27:28,073 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/hbase/data/default/testReplayEditsWrittenViaHRegion/2b95f19da34c0fd50f86225b2fe91763/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-12-10T00:27:28,074 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2b95f19da34c0fd50f86225b2fe91763; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59174970, jitterRate=-0.11822423338890076}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T00:27:28,074 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2b95f19da34c0fd50f86225b2fe91763: Writing region info on filesystem at 1733790447960Initializing all the Stores at 1733790447961 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790447961Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790447961Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733790447961Obtaining lock to block concurrent updates at 1733790447985 (+24 ms)Preparing flush snapshotting stores in 2b95f19da34c0fd50f86225b2fe91763 at 1733790447985Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1733790447985Flushing stores of testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. at 1733790447985Flushing 2b95f19da34c0fd50f86225b2fe91763/a: creating writer at 1733790447985Flushing 2b95f19da34c0fd50f86225b2fe91763/a: appending metadata at 1733790447998 (+13 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/a: closing flushed file at 1733790447998Flushing 2b95f19da34c0fd50f86225b2fe91763/b: creating writer at 1733790448008 (+10 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/b: appending metadata at 1733790448020 (+12 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/b: closing flushed file at 1733790448020Flushing 2b95f19da34c0fd50f86225b2fe91763/c: creating writer at 1733790448030 (+10 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/c: appending metadata at 1733790448042 (+12 ms)Flushing 2b95f19da34c0fd50f86225b2fe91763/c: closing flushed file at 1733790448042Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@80c9a59: reopening flushed file at 1733790448053 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7371224e: reopening flushed file at 1733790448058 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a5bf922: reopening flushed file at 1733790448063 (+5 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 2b95f19da34c0fd50f86225b2fe91763 in 83ms, sequenceid=66, compaction requested=false; wal=null at 1733790448068 (+5 ms)Cleaning up temporary data from old regions at 1733790448070 (+2 ms)Region opened successfully at 1733790448074 (+4 ms) 2024-12-10T00:27:28,085 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2b95f19da34c0fd50f86225b2fe91763, disabling compactions & flushes 2024-12-10T00:27:28,085 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:28,086 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:28,086 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. after waiting 0 ms 2024-12-10T00:27:28,086 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:28,087 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733790443562.2b95f19da34c0fd50f86225b2fe91763. 2024-12-10T00:27:28,087 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2b95f19da34c0fd50f86225b2fe91763: Waiting for close lock at 1733790448085Disabling compacts and flushes for region at 1733790448085Disabling writes for close at 1733790448086 (+1 ms)Writing region close event to WAL at 1733790448087 (+1 ms)Closed at 1733790448087 2024-12-10T00:27:28,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741934_1114 (size=95) 2024-12-10T00:27:28,090 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790447921 not finished, retry = 0 2024-12-10T00:27:28,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741934_1114 (size=95) 2024-12-10T00:27:28,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741934_1114 (size=95) 2024-12-10T00:27:28,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T00:27:28,192 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733790447921) 2024-12-10T00:27:28,210 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=446 (was 441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1532437667_22 at /127.0.0.1:37706 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:34093 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1532437667_22 at /127.0.0.1:38352 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1532437667_22 at /127.0.0.1:58938 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:34093 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1390 (was 1324) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=235 (was 255), ProcessCount=11 (was 11), AvailableMemoryMB=7050 (was 7093) 2024-12-10T00:27:28,211 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1390 is superior to 1024 2024-12-10T00:27:28,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T00:27:28,211 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T00:27:28,211 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T00:27:28,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:28,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:28,212 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T00:27:28,212 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T00:27:28,212 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=840935073, stopped=false 2024-12-10T00:27:28,213 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a67c4886b4f7,41433,1733790408551 2024-12-10T00:27:28,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T00:27:28,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T00:27:28,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:27:28,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T00:27:28,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:27:28,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:27:28,219 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T00:27:28,220 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T00:27:28,220 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T00:27:28,221 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T00:27:28,221 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:28,221 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T00:27:28,221 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a67c4886b4f7,39473,1733790409727' ***** 2024-12-10T00:27:28,221 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T00:27:28,221 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a67c4886b4f7,42867,1733790410027' ***** 2024-12-10T00:27:28,221 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T00:27:28,221 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T00:27:28,221 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T00:27:28,221 INFO [RS:0;a67c4886b4f7:39473 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T00:27:28,222 INFO [RS:0;a67c4886b4f7:39473 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T00:27:28,222 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T00:27:28,222 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(3091): Received CLOSE for 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:28,222 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(959): stopping server a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:28,222 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T00:27:28,222 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0273fa7e527747c1d22b1fb928589bc9, disabling compactions & flushes 2024-12-10T00:27:28,222 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:28,222 INFO [RS:0;a67c4886b4f7:39473 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a67c4886b4f7:39473. 2024-12-10T00:27:28,222 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:28,222 DEBUG [RS:0;a67c4886b4f7:39473 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T00:27:28,222 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T00:27:28,222 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. after waiting 0 ms 2024-12-10T00:27:28,222 DEBUG [RS:0;a67c4886b4f7:39473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:28,222 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:28,222 INFO [RS:2;a67c4886b4f7:42867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T00:27:28,222 INFO [RS:2;a67c4886b4f7:42867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T00:27:28,222 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T00:27:28,222 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(959): stopping server a67c4886b4f7,42867,1733790410027 2024-12-10T00:27:28,222 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1325): Online Regions={0273fa7e527747c1d22b1fb928589bc9=testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9.} 2024-12-10T00:27:28,222 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T00:27:28,222 DEBUG [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1351): Waiting on 0273fa7e527747c1d22b1fb928589bc9 2024-12-10T00:27:28,222 INFO [RS:2;a67c4886b4f7:42867 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;a67c4886b4f7:42867. 2024-12-10T00:27:28,222 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T00:27:28,222 DEBUG [RS:2;a67c4886b4f7:42867 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T00:27:28,222 DEBUG [RS:2;a67c4886b4f7:42867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:28,223 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T00:27:28,223 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T00:27:28,223 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T00:27:28,223 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T00:27:28,224 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T00:27:28,224 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-10T00:27:28,224 DEBUG [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-10T00:27:28,224 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T00:27:28,224 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T00:27:28,224 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T00:27:28,224 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T00:27:28,224 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T00:27:28,224 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=6.86 KB heapSize=11.45 KB 2024-12-10T00:27:28,227 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0273fa7e527747c1d22b1fb928589bc9/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-12-10T00:27:28,227 INFO [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:28,227 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0273fa7e527747c1d22b1fb928589bc9: Waiting for close lock at 1733790448222Running coprocessor pre-close hooks at 1733790448222Disabling compacts and flushes for region at 1733790448222Disabling writes for close at 1733790448222Writing region close event to WAL at 1733790448223 (+1 ms)Running coprocessor post-close hooks at 1733790448227 (+4 ms)Closed at 1733790448227 2024-12-10T00:27:28,227 DEBUG [RS_CLOSE_REGION-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9. 2024-12-10T00:27:28,242 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/info/3d91ee3f30c84ab0b526dad364aba517 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733790424549.0273fa7e527747c1d22b1fb928589bc9./info:regioninfo/1733790427715/Put/seqid=0 2024-12-10T00:27:28,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741938_1118 (size=8243) 2024-12-10T00:27:28,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741938_1118 (size=8243) 2024-12-10T00:27:28,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741938_1118 (size=8243) 2024-12-10T00:27:28,250 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.65 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/info/3d91ee3f30c84ab0b526dad364aba517 2024-12-10T00:27:28,263 INFO [regionserver/a67c4886b4f7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T00:27:28,276 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/ns/0f021cb04a7443bcab9d6cf0076ee726 is 43, key is default/ns:d/1733790412266/Put/seqid=0 2024-12-10T00:27:28,279 INFO [regionserver/a67c4886b4f7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T00:27:28,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741939_1119 (size=5153) 2024-12-10T00:27:28,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741939_1119 (size=5153) 2024-12-10T00:27:28,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741939_1119 (size=5153) 2024-12-10T00:27:28,283 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/ns/0f021cb04a7443bcab9d6cf0076ee726 2024-12-10T00:27:28,301 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/table/e29945a0af4749ec974e7a4492b0eb24 is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1733790424966/Put/seqid=0 2024-12-10T00:27:28,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741940_1120 (size=5431) 2024-12-10T00:27:28,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741940_1120 (size=5431) 2024-12-10T00:27:28,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741940_1120 (size=5431) 2024-12-10T00:27:28,308 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/table/e29945a0af4749ec974e7a4492b0eb24 2024-12-10T00:27:28,314 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/info/3d91ee3f30c84ab0b526dad364aba517 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/info/3d91ee3f30c84ab0b526dad364aba517 2024-12-10T00:27:28,320 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/info/3d91ee3f30c84ab0b526dad364aba517, entries=18, sequenceid=21, filesize=8.0 K 2024-12-10T00:27:28,321 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/ns/0f021cb04a7443bcab9d6cf0076ee726 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/ns/0f021cb04a7443bcab9d6cf0076ee726 2024-12-10T00:27:28,327 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/ns/0f021cb04a7443bcab9d6cf0076ee726, entries=2, sequenceid=21, filesize=5.0 K 2024-12-10T00:27:28,328 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/.tmp/table/e29945a0af4749ec974e7a4492b0eb24 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/table/e29945a0af4749ec974e7a4492b0eb24 2024-12-10T00:27:28,334 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/table/e29945a0af4749ec974e7a4492b0eb24, entries=2, sequenceid=21, filesize=5.3 K 2024-12-10T00:27:28,335 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 111ms, sequenceid=21, compaction requested=false 2024-12-10T00:27:28,340 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-10T00:27:28,340 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T00:27:28,340 INFO [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T00:27:28,341 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733790448224Running coprocessor pre-close hooks at 1733790448224Disabling compacts and flushes for region at 1733790448224Disabling writes for close at 1733790448224Obtaining lock to block concurrent updates at 1733790448224Preparing flush snapshotting stores in 1588230740 at 1733790448224Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=7029, getHeapSize=11664, getOffHeapSize=0, getCellsCount=48 at 1733790448225 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733790448225Flushing 1588230740/info: creating writer at 1733790448226 (+1 ms)Flushing 1588230740/info: appending metadata at 1733790448242 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733790448242Flushing 1588230740/ns: creating writer at 1733790448257 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733790448275 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1733790448276 (+1 ms)Flushing 1588230740/table: creating writer at 1733790448288 (+12 ms)Flushing 1588230740/table: appending metadata at 1733790448301 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733790448301Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@263e999f: reopening flushed file at 1733790448313 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6be013ff: reopening flushed file at 1733790448320 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f156711: reopening flushed file at 1733790448327 (+7 ms)Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 111ms, sequenceid=21, compaction requested=false at 1733790448335 (+8 ms)Writing region close event to WAL at 1733790448336 (+1 ms)Running coprocessor post-close hooks at 1733790448340 (+4 ms)Closed at 1733790448340 2024-12-10T00:27:28,341 DEBUG [RS_CLOSE_META-regionserver/a67c4886b4f7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T00:27:28,422 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(976): stopping server a67c4886b4f7,39473,1733790409727; all regions closed. 2024-12-10T00:27:28,424 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(976): stopping server a67c4886b4f7,42867,1733790410027; all regions closed. 2024-12-10T00:27:28,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741835_1011 (size=2178) 2024-12-10T00:27:28,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741835_1011 (size=2178) 2024-12-10T00:27:28,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741836_1012 (size=4714) 2024-12-10T00:27:28,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741836_1012 (size=4714) 2024-12-10T00:27:28,427 DEBUG [RS:0;a67c4886b4f7:39473 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs 2024-12-10T00:27:28,427 INFO [RS:0;a67c4886b4f7:39473 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a67c4886b4f7%2C39473%2C1733790409727:(num 1733790411689) 2024-12-10T00:27:28,427 DEBUG [RS:0;a67c4886b4f7:39473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:28,427 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T00:27:28,427 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T00:27:28,428 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.ChoreService(370): Chore service for: regionserver/a67c4886b4f7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T00:27:28,428 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T00:27:28,428 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T00:27:28,428 INFO [regionserver/a67c4886b4f7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T00:27:28,428 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T00:27:28,428 DEBUG [RS:2;a67c4886b4f7:42867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs 2024-12-10T00:27:28,428 INFO [RS:2;a67c4886b4f7:42867 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a67c4886b4f7%2C42867%2C1733790410027.meta:.meta(num 1733790412079) 2024-12-10T00:27:28,428 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T00:27:28,428 INFO [RS:0;a67c4886b4f7:39473 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39473 2024-12-10T00:27:28,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741834_1010 (size=95) 2024-12-10T00:27:28,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741834_1010 (size=95) 2024-12-10T00:27:28,433 DEBUG [RS:2;a67c4886b4f7:42867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/oldWALs 2024-12-10T00:27:28,433 INFO [RS:2;a67c4886b4f7:42867 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a67c4886b4f7%2C42867%2C1733790410027:(num 1733790411691) 2024-12-10T00:27:28,433 DEBUG [RS:2;a67c4886b4f7:42867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T00:27:28,433 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T00:27:28,433 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T00:27:28,433 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.ChoreService(370): Chore service for: regionserver/a67c4886b4f7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T00:27:28,433 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T00:27:28,433 INFO [regionserver/a67c4886b4f7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T00:27:28,434 INFO [RS:2;a67c4886b4f7:42867 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42867 2024-12-10T00:27:28,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a67c4886b4f7,39473,1733790409727 2024-12-10T00:27:28,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T00:27:28,440 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T00:27:28,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a67c4886b4f7,42867,1733790410027 2024-12-10T00:27:28,451 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T00:27:28,461 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a67c4886b4f7,39473,1733790409727] 2024-12-10T00:27:28,482 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a67c4886b4f7,39473,1733790409727 already deleted, retry=false 2024-12-10T00:27:28,483 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a67c4886b4f7,39473,1733790409727 expired; onlineServers=1 2024-12-10T00:27:28,483 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a67c4886b4f7,42867,1733790410027] 2024-12-10T00:27:28,493 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a67c4886b4f7,42867,1733790410027 already deleted, retry=false 2024-12-10T00:27:28,493 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a67c4886b4f7,42867,1733790410027 expired; onlineServers=0 2024-12-10T00:27:28,493 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a67c4886b4f7,41433,1733790408551' ***** 2024-12-10T00:27:28,493 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T00:27:28,494 INFO [M:0;a67c4886b4f7:41433 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T00:27:28,494 INFO [M:0;a67c4886b4f7:41433 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T00:27:28,494 DEBUG [M:0;a67c4886b4f7:41433 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T00:27:28,494 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T00:27:28,494 DEBUG [M:0;a67c4886b4f7:41433 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T00:27:28,495 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster-HFileCleaner.small.0-1733790411284 {}] cleaner.HFileCleaner(306): Exit Thread[master/a67c4886b4f7:0:becomeActiveMaster-HFileCleaner.small.0-1733790411284,5,FailOnTimeoutGroup] 2024-12-10T00:27:28,495 DEBUG [master/a67c4886b4f7:0:becomeActiveMaster-HFileCleaner.large.0-1733790411283 {}] cleaner.HFileCleaner(306): Exit Thread[master/a67c4886b4f7:0:becomeActiveMaster-HFileCleaner.large.0-1733790411283,5,FailOnTimeoutGroup] 2024-12-10T00:27:28,495 INFO [M:0;a67c4886b4f7:41433 {}] hbase.ChoreService(370): Chore service for: master/a67c4886b4f7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T00:27:28,495 INFO [M:0;a67c4886b4f7:41433 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T00:27:28,496 DEBUG [M:0;a67c4886b4f7:41433 {}] master.HMaster(1795): Stopping service threads 2024-12-10T00:27:28,496 INFO [M:0;a67c4886b4f7:41433 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T00:27:28,496 INFO [M:0;a67c4886b4f7:41433 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T00:27:28,497 INFO [M:0;a67c4886b4f7:41433 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T00:27:28,498 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T00:27:28,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T00:27:28,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T00:27:28,504 DEBUG [M:0;a67c4886b4f7:41433 {}] zookeeper.ZKUtil(347): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T00:27:28,504 WARN [M:0;a67c4886b4f7:41433 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T00:27:28,505 INFO [M:0;a67c4886b4f7:41433 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/.lastflushedseqids 2024-12-10T00:27:28,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741941_1121 (size=138) 2024-12-10T00:27:28,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741941_1121 (size=138) 2024-12-10T00:27:28,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741941_1121 (size=138) 2024-12-10T00:27:28,520 INFO [M:0;a67c4886b4f7:41433 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T00:27:28,521 INFO [M:0;a67c4886b4f7:41433 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T00:27:28,521 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T00:27:28,521 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:27:28,521 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:27:28,521 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T00:27:28,521 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:27:28,521 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=68.34 KB heapSize=83.73 KB 2024-12-10T00:27:28,535 DEBUG [M:0;a67c4886b4f7:41433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/95a30de672fb492a83c6d40b3b5a70c8 is 82, key is hbase:meta,,1/info:regioninfo/1733790412179/Put/seqid=0 2024-12-10T00:27:28,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741942_1122 (size=5672) 2024-12-10T00:27:28,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741942_1122 (size=5672) 2024-12-10T00:27:28,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741942_1122 (size=5672) 2024-12-10T00:27:28,541 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/95a30de672fb492a83c6d40b3b5a70c8 2024-12-10T00:27:28,560 DEBUG [M:0;a67c4886b4f7:41433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a7cf4e9eff0b489a849372bcfdf5ebbb is 1076, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733790424971/Put/seqid=0 2024-12-10T00:27:28,561 INFO [RS:0;a67c4886b4f7:39473 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T00:27:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:28,561 INFO [RS:0;a67c4886b4f7:39473 {}] regionserver.HRegionServer(1031): Exiting; stopping=a67c4886b4f7,39473,1733790409727; zookeeper connection closed. 2024-12-10T00:27:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39473-0x1000d1e23400001, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:28,562 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4827ed6e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4827ed6e 2024-12-10T00:27:28,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741943_1123 (size=7755) 2024-12-10T00:27:28,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741943_1123 (size=7755) 2024-12-10T00:27:28,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741943_1123 (size=7755) 2024-12-10T00:27:28,568 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.61 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a7cf4e9eff0b489a849372bcfdf5ebbb 2024-12-10T00:27:28,572 INFO [RS:2;a67c4886b4f7:42867 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T00:27:28,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:28,572 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a7cf4e9eff0b489a849372bcfdf5ebbb 2024-12-10T00:27:28,572 INFO [RS:2;a67c4886b4f7:42867 {}] regionserver.HRegionServer(1031): Exiting; stopping=a67c4886b4f7,42867,1733790410027; zookeeper connection closed. 2024-12-10T00:27:28,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x1000d1e23400003, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:28,572 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44ed623c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44ed623c 2024-12-10T00:27:28,573 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T00:27:28,585 DEBUG [M:0;a67c4886b4f7:41433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8750eb9dbf86409f8b27371ab89c77bc is 69, key is a67c4886b4f7,39473,1733790409727/rs:state/1733790411359/Put/seqid=0 2024-12-10T00:27:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741944_1124 (size=5440) 2024-12-10T00:27:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741944_1124 (size=5440) 2024-12-10T00:27:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741944_1124 (size=5440) 2024-12-10T00:27:28,592 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8750eb9dbf86409f8b27371ab89c77bc 2024-12-10T00:27:28,596 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8750eb9dbf86409f8b27371ab89c77bc 2024-12-10T00:27:28,597 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/95a30de672fb492a83c6d40b3b5a70c8 as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/95a30de672fb492a83c6d40b3b5a70c8 2024-12-10T00:27:28,602 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/95a30de672fb492a83c6d40b3b5a70c8, entries=8, sequenceid=168, filesize=5.5 K 2024-12-10T00:27:28,603 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a7cf4e9eff0b489a849372bcfdf5ebbb as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a7cf4e9eff0b489a849372bcfdf5ebbb 2024-12-10T00:27:28,608 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a7cf4e9eff0b489a849372bcfdf5ebbb 2024-12-10T00:27:28,608 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a7cf4e9eff0b489a849372bcfdf5ebbb, entries=17, sequenceid=168, filesize=7.6 K 2024-12-10T00:27:28,609 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8750eb9dbf86409f8b27371ab89c77bc as hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8750eb9dbf86409f8b27371ab89c77bc 2024-12-10T00:27:28,614 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8750eb9dbf86409f8b27371ab89c77bc 2024-12-10T00:27:28,614 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34093/user/jenkins/test-data/99a42370-ce4d-fd01-a174-de989642a507/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8750eb9dbf86409f8b27371ab89c77bc, entries=3, sequenceid=168, filesize=5.3 K 2024-12-10T00:27:28,615 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.34 KB/69984, heapSize ~83.43 KB/85432, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 94ms, sequenceid=168, compaction requested=false 2024-12-10T00:27:28,616 INFO [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T00:27:28,616 DEBUG [M:0;a67c4886b4f7:41433 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733790448521Disabling compacts and flushes for region at 1733790448521Disabling writes for close at 1733790448521Obtaining lock to block concurrent updates at 1733790448521Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733790448521Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=69984, getHeapSize=85672, getOffHeapSize=0, getCellsCount=195 at 1733790448522 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733790448522Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733790448522Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733790448535 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733790448535Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733790448546 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733790448560 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733790448560Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733790448572 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733790448585 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733790448585Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56dea773: reopening flushed file at 1733790448596 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ce1310a: reopening flushed file at 1733790448602 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@456251a4: reopening flushed file at 1733790448608 (+6 ms)Finished flush of dataSize ~68.34 KB/69984, heapSize ~83.43 KB/85432, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 94ms, sequenceid=168, compaction requested=false at 1733790448615 (+7 ms)Writing region close event to WAL at 1733790448616 (+1 ms)Closed at 1733790448616 2024-12-10T00:27:28,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741830_1006 (size=56595) 2024-12-10T00:27:28,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37237 is added to blk_1073741830_1006 (size=56595) 2024-12-10T00:27:28,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34693 is added to blk_1073741830_1006 (size=56595) 2024-12-10T00:27:28,621 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T00:27:28,621 INFO [M:0;a67c4886b4f7:41433 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T00:27:28,621 INFO [M:0;a67c4886b4f7:41433 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41433 2024-12-10T00:27:28,621 INFO [M:0;a67c4886b4f7:41433 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T00:27:28,751 INFO [M:0;a67c4886b4f7:41433 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T00:27:28,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:28,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41433-0x1000d1e23400000, quorum=127.0.0.1:51780, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T00:27:28,761 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428463 with renewLeaseKey: DEFAULT_16655 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428463 (inode 16655) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733790427964/wal.1733790428463 (inode 16655) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T00:27:28,762 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733790420088/wal.1733790420268 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:28,762 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733790443561/wal.1733790443738 with renewLeaseKey: DEFAULT_16767 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:28,765 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790437900 with renewLeaseKey: DEFAULT_16678 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790437900 (inode 16678) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733790428612/wal.1733790437900 (inode 16678) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T00:27:28,765 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733790413404/wal.1733790413496 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:28,767 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438633 with renewLeaseKey: DEFAULT_16704 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438633 (inode 16704) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733790438098/wal.1733790438633 (inode 16704) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T00:27:28,768 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733790438781/wal.1733790438838 with renewLeaseKey: DEFAULT_16726 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:28,769 WARN [PacketResponder: BP-313768122-172.17.0.2-1733790403195:blk_1073741851_1027, type=LAST_IN_PIPELINE {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: Replica does not exist BP-313768122-172.17.0.2-1733790403195:1073741851 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.getReplicaInfo(FsDatasetImpl.java:897) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.getStorageUuidForLock(FsDatasetImpl.java:905) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.finalizeBlock(FsDatasetImpl.java:1975) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.finalizeBlock(BlockReceiver.java:1563) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1514) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:28,770 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733790413092/wal.1733790413273 with renewLeaseKey: DEFAULT_16485 java.io.IOException: Connection to /127.0.0.1:34693 closed at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:581) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput$AckHandler.lambda$channelInactive$2(FanOutOneBlockAsyncDFSOutput.java:323) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.failWaitingAckQueue(FanOutOneBlockAsyncDFSOutput.java:271) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.failed(FanOutOneBlockAsyncDFSOutput.java:266) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput$AckHandler.channelInactive(FanOutOneBlockAsyncDFSOutput.java:322) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:303) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:281) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelInactive(AbstractChannelHandlerContext.java:274) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelInputClosed(ByteToMessageDecoder.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelInactive(ByteToMessageDecoder.java:377) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:303) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:281) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelInactive(AbstractChannelHandlerContext.java:274) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter.channelInactive(ChannelInboundHandlerAdapter.java:81) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelInactive(IdleStateHandler.java:280) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:303) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:281) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelInactive(AbstractChannelHandlerContext.java:274) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelInactive(DefaultChannelPipeline.java:1352) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:301) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:281) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelInactive(DefaultChannelPipeline.java:850) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe$7.run(AbstractChannel.java:811) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.AbstractEventExecutor.runTask(AbstractEventExecutor.java:173) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:566) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T00:27:28,772 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal.1733790412847 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal.1733790412847 (inode 16462) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733790412580/wal.1733790412847 (inode 16462) Holder DFSClient_NONMAPREDUCE_1400862134_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T00:27:28,776 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43206bef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T00:27:28,778 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@228ffa29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T00:27:28,778 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T00:27:28,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f079a76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T00:27:28,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7da22a2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,STOPPED} 2024-12-10T00:27:28,781 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T00:27:28,781 WARN [BP-313768122-172.17.0.2-1733790403195 heartbeating to localhost/127.0.0.1:34093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T00:27:28,781 WARN [BP-313768122-172.17.0.2-1733790403195 heartbeating to localhost/127.0.0.1:34093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-313768122-172.17.0.2-1733790403195 (Datanode Uuid de0f791f-5bfa-4730-a30a-2d4362809fdb) service to localhost/127.0.0.1:34093 2024-12-10T00:27:28,781 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T00:27:28,782 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data5/current/BP-313768122-172.17.0.2-1733790403195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T00:27:28,782 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data6/current/BP-313768122-172.17.0.2-1733790403195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T00:27:28,783 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T00:27:28,784 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4546bb60{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T00:27:28,785 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c7d32f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T00:27:28,785 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T00:27:28,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ebbf344{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T00:27:28,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b5fc47c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,STOPPED} 2024-12-10T00:27:28,786 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T00:27:28,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T00:27:28,786 WARN [BP-313768122-172.17.0.2-1733790403195 heartbeating to localhost/127.0.0.1:34093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T00:27:28,786 WARN [BP-313768122-172.17.0.2-1733790403195 heartbeating to localhost/127.0.0.1:34093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-313768122-172.17.0.2-1733790403195 (Datanode Uuid 4313b812-b7fa-42ae-8dc6-44033a19389c) service to localhost/127.0.0.1:34093 2024-12-10T00:27:28,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data3/current/BP-313768122-172.17.0.2-1733790403195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T00:27:28,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data4/current/BP-313768122-172.17.0.2-1733790403195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T00:27:28,787 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T00:27:28,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36632d60{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T00:27:28,789 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@751d2fa4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T00:27:28,789 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T00:27:28,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@433df981{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T00:27:28,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f76f489{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,STOPPED} 2024-12-10T00:27:28,790 WARN [BP-313768122-172.17.0.2-1733790403195 heartbeating to localhost/127.0.0.1:34093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T00:27:28,791 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T00:27:28,791 WARN [BP-313768122-172.17.0.2-1733790403195 heartbeating to localhost/127.0.0.1:34093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-313768122-172.17.0.2-1733790403195 (Datanode Uuid fff3098e-8200-4489-9cf1-27afa7f9c4a0) service to localhost/127.0.0.1:34093 2024-12-10T00:27:28,791 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T00:27:28,791 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data1/current/BP-313768122-172.17.0.2-1733790403195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T00:27:28,791 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/cluster_e7617543-7e8a-8641-fe8e-64579a0fcf35/data/data2/current/BP-313768122-172.17.0.2-1733790403195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T00:27:28,791 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T00:27:28,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3717288f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T00:27:28,797 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T00:27:28,797 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T00:27:28,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T00:27:28,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f6954c5-aaf4-c51a-d14d-2eae4868097a/hadoop.log.dir/,STOPPED} 2024-12-10T00:27:28,805 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T00:27:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down