2024-11-24 04:49:29,682 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-24 04:49:29,695 main DEBUG Took 0.011063 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 04:49:29,696 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 04:49:29,696 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 04:49:29,698 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 04:49:29,699 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,706 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 04:49:29,722 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,723 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,724 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,724 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,725 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,725 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,726 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,727 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,727 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,727 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,728 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,728 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,729 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,729 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,730 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,730 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,730 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,731 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,731 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,731 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,732 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,732 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,733 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,733 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 04:49:29,733 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,734 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 04:49:29,735 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 04:49:29,736 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 04:49:29,738 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 04:49:29,738 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 04:49:29,739 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 04:49:29,740 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 04:49:29,748 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 04:49:29,751 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 04:49:29,752 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 04:49:29,753 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 04:49:29,753 main DEBUG createAppenders(={Console}) 2024-11-24 04:49:29,754 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-11-24 04:49:29,754 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-24 04:49:29,754 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-11-24 04:49:29,755 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 04:49:29,755 main DEBUG OutputStream closed 2024-11-24 04:49:29,756 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 04:49:29,756 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 04:49:29,756 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-11-24 04:49:29,858 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 04:49:29,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 04:49:29,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 04:49:29,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 04:49:29,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 04:49:29,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 04:49:29,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 04:49:29,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 04:49:29,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 04:49:29,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 04:49:29,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 04:49:29,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 04:49:29,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 04:49:29,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 04:49:29,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 04:49:29,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 04:49:29,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 04:49:29,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 04:49:29,872 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 04:49:29,873 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-11-24 04:49:29,873 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 04:49:29,873 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-11-24T04:49:30,156 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa 2024-11-24 04:49:30,159 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 04:49:30,160 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T04:49:30,168 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-11-24T04:49:30,175 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-11-24T04:49:30,200 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T04:49:30,257 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T04:49:30,257 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T04:49:30,270 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T04:49:30,291 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2, deleteOnExit=true 2024-11-24T04:49:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T04:49:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/test.cache.data in system properties and HBase conf 2024-11-24T04:49:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T04:49:30,295 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir in system properties and HBase conf 2024-11-24T04:49:30,295 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T04:49:30,296 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T04:49:30,297 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T04:49:30,426 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T04:49:30,578 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T04:49:30,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T04:49:30,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T04:49:30,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T04:49:30,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T04:49:30,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T04:49:30,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T04:49:30,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T04:49:30,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T04:49:30,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T04:49:30,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/nfs.dump.dir in system properties and HBase conf 2024-11-24T04:49:30,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/java.io.tmpdir in system properties and HBase conf 2024-11-24T04:49:30,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T04:49:30,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T04:49:30,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T04:49:31,913 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T04:49:31,997 INFO [Time-limited test {}] log.Log(170): Logging initialized @3173ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T04:49:32,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T04:49:32,165 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T04:49:32,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T04:49:32,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T04:49:32,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T04:49:32,231 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T04:49:32,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e216de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T04:49:32,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4763bc82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T04:49:32,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5602fba9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/java.io.tmpdir/jetty-localhost-40973-hadoop-hdfs-3_4_1-tests_jar-_-any-6895710040582572972/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T04:49:32,483 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@783558f7{HTTP/1.1, (http/1.1)}{localhost:40973} 2024-11-24T04:49:32,483 INFO [Time-limited test {}] server.Server(415): Started @3660ms 2024-11-24T04:49:33,161 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T04:49:33,171 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T04:49:33,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T04:49:33,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T04:49:33,173 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T04:49:33,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1647e812{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T04:49:33,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e19ad24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T04:49:33,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ddf0278{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/java.io.tmpdir/jetty-localhost-33347-hadoop-hdfs-3_4_1-tests_jar-_-any-3465853954593591106/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T04:49:33,297 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62465dbd{HTTP/1.1, (http/1.1)}{localhost:33347} 2024-11-24T04:49:33,298 INFO [Time-limited test {}] server.Server(415): Started @4474ms 2024-11-24T04:49:33,348 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T04:49:33,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T04:49:33,501 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T04:49:33,504 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T04:49:33,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T04:49:33,505 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T04:49:33,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b863aae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T04:49:33,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40ee7008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T04:49:33,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@42b50423{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/java.io.tmpdir/jetty-localhost-33563-hadoop-hdfs-3_4_1-tests_jar-_-any-15251132685389143772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T04:49:33,645 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cb1f0d1{HTTP/1.1, (http/1.1)}{localhost:33563} 2024-11-24T04:49:33,645 INFO [Time-limited test {}] server.Server(415): Started @4822ms 2024-11-24T04:49:33,647 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T04:49:33,695 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T04:49:33,701 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T04:49:33,705 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T04:49:33,705 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T04:49:33,705 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T04:49:33,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28786e11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T04:49:33,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6334c715{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T04:49:33,805 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ac22c80{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/java.io.tmpdir/jetty-localhost-35789-hadoop-hdfs-3_4_1-tests_jar-_-any-2756023064444051512/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T04:49:33,806 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@487c0a0f{HTTP/1.1, (http/1.1)}{localhost:35789} 2024-11-24T04:49:33,806 INFO [Time-limited test {}] server.Server(415): Started @4983ms 2024-11-24T04:49:33,808 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T04:49:35,013 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data4/current/BP-1373737803-172.17.0.2-1732423771153/current, will proceed with Du for space computation calculation, 2024-11-24T04:49:35,013 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data3/current/BP-1373737803-172.17.0.2-1732423771153/current, will proceed with Du for space computation calculation, 2024-11-24T04:49:35,013 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data2/current/BP-1373737803-172.17.0.2-1732423771153/current, will proceed with Du for space computation calculation, 2024-11-24T04:49:35,013 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data1/current/BP-1373737803-172.17.0.2-1732423771153/current, will proceed with Du for space computation calculation, 2024-11-24T04:49:35,046 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T04:49:35,046 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T04:49:35,049 WARN [Thread-137 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data6/current/BP-1373737803-172.17.0.2-1732423771153/current, will proceed with Du for space computation calculation, 2024-11-24T04:49:35,049 WARN [Thread-136 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data5/current/BP-1373737803-172.17.0.2-1732423771153/current, will proceed with Du for space computation calculation, 2024-11-24T04:49:35,073 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T04:49:35,099 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7200a34c13a8b21a with lease ID 0xb56098e124d7d8e4: Processing first storage report for DS-f39319b7-5869-4767-a451-c3ef9ef6aac6 from datanode DatanodeRegistration(127.0.0.1:44605, datanodeUuid=6240c108-d1b0-413f-8591-50ccf623f7da, infoPort=36007, infoSecurePort=0, ipcPort=37409, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153) 2024-11-24T04:49:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7200a34c13a8b21a with lease ID 0xb56098e124d7d8e4: from storage DS-f39319b7-5869-4767-a451-c3ef9ef6aac6 node DatanodeRegistration(127.0.0.1:44605, datanodeUuid=6240c108-d1b0-413f-8591-50ccf623f7da, infoPort=36007, infoSecurePort=0, ipcPort=37409, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-24T04:49:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78fa4b34e57d5343 with lease ID 0xb56098e124d7d8e5: Processing first storage report for DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28 from datanode DatanodeRegistration(127.0.0.1:36429, datanodeUuid=4a6ddd70-8fc5-4259-a718-7371409e11f6, infoPort=42871, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153) 2024-11-24T04:49:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78fa4b34e57d5343 with lease ID 0xb56098e124d7d8e5: from storage DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28 node DatanodeRegistration(127.0.0.1:36429, datanodeUuid=4a6ddd70-8fc5-4259-a718-7371409e11f6, infoPort=42871, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T04:49:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2818813aaf9fc with lease ID 0xb56098e124d7d8e3: Processing first storage report for DS-92f8f17e-0a78-4e24-b7af-da7223272a99 from datanode DatanodeRegistration(127.0.0.1:44795, datanodeUuid=0a688774-b62b-4628-a4b3-91e6af29ec07, infoPort=37591, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153) 2024-11-24T04:49:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2818813aaf9fc with lease ID 0xb56098e124d7d8e3: from storage DS-92f8f17e-0a78-4e24-b7af-da7223272a99 node DatanodeRegistration(127.0.0.1:44795, datanodeUuid=0a688774-b62b-4628-a4b3-91e6af29ec07, infoPort=37591, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T04:49:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7200a34c13a8b21a with lease ID 0xb56098e124d7d8e4: Processing first storage report for DS-0cdb9ae1-ab07-4eee-a07d-8f9e28916ad0 from datanode DatanodeRegistration(127.0.0.1:44605, datanodeUuid=6240c108-d1b0-413f-8591-50ccf623f7da, infoPort=36007, infoSecurePort=0, ipcPort=37409, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153) 2024-11-24T04:49:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7200a34c13a8b21a with lease ID 0xb56098e124d7d8e4: from storage DS-0cdb9ae1-ab07-4eee-a07d-8f9e28916ad0 node DatanodeRegistration(127.0.0.1:44605, datanodeUuid=6240c108-d1b0-413f-8591-50ccf623f7da, infoPort=36007, infoSecurePort=0, ipcPort=37409, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T04:49:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78fa4b34e57d5343 with lease ID 0xb56098e124d7d8e5: Processing first storage report for DS-9cab2e21-c77c-4b76-80f3-8e3ee7720e84 from datanode DatanodeRegistration(127.0.0.1:36429, datanodeUuid=4a6ddd70-8fc5-4259-a718-7371409e11f6, infoPort=42871, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153) 2024-11-24T04:49:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78fa4b34e57d5343 with lease ID 0xb56098e124d7d8e5: from storage DS-9cab2e21-c77c-4b76-80f3-8e3ee7720e84 node DatanodeRegistration(127.0.0.1:36429, datanodeUuid=4a6ddd70-8fc5-4259-a718-7371409e11f6, infoPort=42871, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T04:49:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2818813aaf9fc with lease ID 0xb56098e124d7d8e3: Processing first storage report for DS-b6ede24e-ee07-4c61-99b8-bd5ca360aec5 from datanode DatanodeRegistration(127.0.0.1:44795, datanodeUuid=0a688774-b62b-4628-a4b3-91e6af29ec07, infoPort=37591, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153) 2024-11-24T04:49:35,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2818813aaf9fc with lease ID 0xb56098e124d7d8e3: from storage DS-b6ede24e-ee07-4c61-99b8-bd5ca360aec5 node DatanodeRegistration(127.0.0.1:44795, datanodeUuid=0a688774-b62b-4628-a4b3-91e6af29ec07, infoPort=37591, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=554217175;c=1732423771153), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T04:49:35,142 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa 2024-11-24T04:49:35,232 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/zookeeper_0, clientPort=55024, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T04:49:35,246 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55024 2024-11-24T04:49:35,261 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:35,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:35,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741825_1001 (size=7) 2024-11-24T04:49:35,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741825_1001 (size=7) 2024-11-24T04:49:35,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741825_1001 (size=7) 2024-11-24T04:49:35,898 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f with version=8 2024-11-24T04:49:35,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/hbase-staging 2024-11-24T04:49:36,184 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4464c5b832df:0 server-side Connection retries=45 2024-11-24T04:49:36,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:36,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:36,205 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T04:49:36,205 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:36,205 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T04:49:36,373 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T04:49:36,439 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T04:49:36,449 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T04:49:36,453 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T04:49:36,487 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 53934 (auto-detected) 2024-11-24T04:49:36,488 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T04:49:36,510 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34701 2024-11-24T04:49:36,531 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34701 connecting to ZooKeeper ensemble=127.0.0.1:55024 2024-11-24T04:49:36,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347010x0, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T04:49:36,691 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34701-0x1016b2cef8a0000 connected 2024-11-24T04:49:37,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,340 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T04:49:37,360 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f, hbase.cluster.distributed=false 2024-11-24T04:49:37,390 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T04:49:37,397 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34701 2024-11-24T04:49:37,398 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34701 2024-11-24T04:49:37,398 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34701 2024-11-24T04:49:37,399 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34701 2024-11-24T04:49:37,399 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34701 2024-11-24T04:49:37,496 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4464c5b832df:0 server-side Connection retries=45 2024-11-24T04:49:37,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,499 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,499 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T04:49:37,499 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,500 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T04:49:37,502 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T04:49:37,506 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T04:49:37,507 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39197 2024-11-24T04:49:37,509 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39197 connecting to ZooKeeper ensemble=127.0.0.1:55024 2024-11-24T04:49:37,510 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,513 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391970x0, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T04:49:37,535 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39197-0x1016b2cef8a0001 connected 2024-11-24T04:49:37,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T04:49:37,540 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T04:49:37,550 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T04:49:37,553 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T04:49:37,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T04:49:37,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39197 2024-11-24T04:49:37,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39197 2024-11-24T04:49:37,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39197 2024-11-24T04:49:37,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39197 2024-11-24T04:49:37,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39197 2024-11-24T04:49:37,593 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4464c5b832df:0 server-side Connection retries=45 2024-11-24T04:49:37,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,595 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T04:49:37,595 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,595 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T04:49:37,596 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T04:49:37,596 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T04:49:37,597 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46039 2024-11-24T04:49:37,600 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46039 connecting to ZooKeeper ensemble=127.0.0.1:55024 2024-11-24T04:49:37,602 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,607 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460390x0, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T04:49:37,630 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46039-0x1016b2cef8a0002 connected 2024-11-24T04:49:37,630 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T04:49:37,630 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T04:49:37,632 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T04:49:37,634 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T04:49:37,636 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T04:49:37,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46039 2024-11-24T04:49:37,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46039 2024-11-24T04:49:37,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46039 2024-11-24T04:49:37,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46039 2024-11-24T04:49:37,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46039 2024-11-24T04:49:37,669 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4464c5b832df:0 server-side Connection retries=45 2024-11-24T04:49:37,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,670 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T04:49:37,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T04:49:37,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T04:49:37,671 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T04:49:37,671 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T04:49:37,672 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39021 2024-11-24T04:49:37,675 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39021 connecting to ZooKeeper ensemble=127.0.0.1:55024 2024-11-24T04:49:37,677 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390210x0, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T04:49:37,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390210x0, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T04:49:37,705 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T04:49:37,709 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39021-0x1016b2cef8a0003 connected 2024-11-24T04:49:37,715 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T04:49:37,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T04:49:37,720 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T04:49:37,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39021 2024-11-24T04:49:37,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39021 2024-11-24T04:49:37,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39021 2024-11-24T04:49:37,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39021 2024-11-24T04:49:37,737 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39021 2024-11-24T04:49:37,755 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4464c5b832df:34701 2024-11-24T04:49:37,756 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4464c5b832df,34701,1732423776004 2024-11-24T04:49:37,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,790 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4464c5b832df,34701,1732423776004 2024-11-24T04:49:37,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T04:49:37,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:37,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T04:49:37,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:37,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:37,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T04:49:37,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:37,827 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T04:49:37,829 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4464c5b832df,34701,1732423776004 from backup master directory 2024-11-24T04:49:37,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4464c5b832df,34701,1732423776004 2024-11-24T04:49:37,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,840 WARN [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T04:49:37,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T04:49:37,840 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4464c5b832df,34701,1732423776004 2024-11-24T04:49:37,842 INFO [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T04:49:37,844 INFO [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T04:49:37,906 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/hbase.id] with ID: 8c0792d3-101b-420e-9c9e-da22fc7d020f 2024-11-24T04:49:37,906 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/.tmp/hbase.id 2024-11-24T04:49:37,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741826_1002 (size=42) 2024-11-24T04:49:37,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741826_1002 (size=42) 2024-11-24T04:49:37,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741826_1002 (size=42) 2024-11-24T04:49:37,923 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/.tmp/hbase.id]:[hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/hbase.id] 2024-11-24T04:49:37,971 INFO [master/4464c5b832df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T04:49:37,976 INFO [master/4464c5b832df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T04:49:37,994 INFO [master/4464c5b832df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-24T04:49:38,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741827_1003 (size=196) 2024-11-24T04:49:38,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741827_1003 (size=196) 2024-11-24T04:49:38,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741827_1003 (size=196) 2024-11-24T04:49:38,044 INFO [master/4464c5b832df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T04:49:38,046 INFO [master/4464c5b832df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T04:49:38,052 INFO [master/4464c5b832df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:38,077 WARN [IPC Server handler 3 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:38,077 WARN [IPC Server handler 3 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:38,077 WARN [IPC Server handler 3 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:38,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741828_1004 (size=1189) 2024-11-24T04:49:38,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741828_1004 (size=1189) 2024-11-24T04:49:38,103 INFO [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store 2024-11-24T04:49:38,116 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:38,116 WARN [IPC Server handler 2 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:38,117 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:38,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741829_1005 (size=34) 2024-11-24T04:49:38,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741829_1005 (size=34) 2024-11-24T04:49:38,127 INFO [master/4464c5b832df:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T04:49:38,130 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:38,131 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T04:49:38,131 INFO [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:49:38,132 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:49:38,133 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T04:49:38,133 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:49:38,133 INFO [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:49:38,134 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732423778131Disabling compacts and flushes for region at 1732423778131Disabling writes for close at 1732423778133 (+2 ms)Writing region close event to WAL at 1732423778133Closed at 1732423778133 2024-11-24T04:49:38,137 WARN [master/4464c5b832df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/.initializing 2024-11-24T04:49:38,137 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/WALs/4464c5b832df,34701,1732423776004 2024-11-24T04:49:38,144 INFO [master/4464c5b832df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:38,158 INFO [master/4464c5b832df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4464c5b832df%2C34701%2C1732423776004, suffix=, logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/WALs/4464c5b832df,34701,1732423776004, archiveDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/oldWALs, maxLogs=10 2024-11-24T04:49:38,186 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/WALs/4464c5b832df,34701,1732423776004/4464c5b832df%2C34701%2C1732423776004.1732423778161, exclude list is [], retry=0 2024-11-24T04:49:38,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:38,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:38,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:38,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:38,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-24T04:49:38,255 INFO [master/4464c5b832df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/WALs/4464c5b832df,34701,1732423776004/4464c5b832df%2C34701%2C1732423776004.1732423778161 2024-11-24T04:49:38,256 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:38,256 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:38,257 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:38,260 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,261 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,322 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T04:49:38,325 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:38,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T04:49:38,333 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:38,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T04:49:38,337 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:38,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T04:49:38,342 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:38,344 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,348 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,349 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,357 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,358 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,362 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T04:49:38,366 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T04:49:38,371 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:38,372 INFO [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73030789, jitterRate=0.08824355900287628}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T04:49:38,380 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732423778272Initializing all the Stores at 1732423778274 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732423778275 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423778275Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423778276 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423778276Cleaning up temporary data from old regions at 1732423778358 (+82 ms)Region opened successfully at 1732423778380 (+22 ms) 2024-11-24T04:49:38,381 INFO [master/4464c5b832df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T04:49:38,415 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e6f7923, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4464c5b832df/172.17.0.2:0 2024-11-24T04:49:38,443 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T04:49:38,452 INFO [master/4464c5b832df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T04:49:38,453 INFO [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T04:49:38,455 INFO [master/4464c5b832df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T04:49:38,456 INFO [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-24T04:49:38,461 INFO [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-24T04:49:38,461 INFO [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T04:49:38,484 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T04:49:38,492 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T04:49:38,502 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T04:49:38,505 INFO [master/4464c5b832df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T04:49:38,508 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T04:49:38,521 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T04:49:38,523 INFO [master/4464c5b832df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T04:49:38,527 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T04:49:38,533 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T04:49:38,535 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T04:49:38,544 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T04:49:38,561 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T04:49:38,573 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T04:49:38,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T04:49:38,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T04:49:38,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T04:49:38,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T04:49:38,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,589 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4464c5b832df,34701,1732423776004, sessionid=0x1016b2cef8a0000, setting cluster-up flag (Was=false) 2024-11-24T04:49:38,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,649 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T04:49:38,651 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4464c5b832df,34701,1732423776004 2024-11-24T04:49:38,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:38,702 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T04:49:38,704 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4464c5b832df,34701,1732423776004 2024-11-24T04:49:38,713 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T04:49:38,741 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(746): ClusterId : 8c0792d3-101b-420e-9c9e-da22fc7d020f 2024-11-24T04:49:38,741 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(746): ClusterId : 8c0792d3-101b-420e-9c9e-da22fc7d020f 2024-11-24T04:49:38,741 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(746): ClusterId : 8c0792d3-101b-420e-9c9e-da22fc7d020f 2024-11-24T04:49:38,744 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T04:49:38,744 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T04:49:38,744 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T04:49:38,767 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T04:49:38,767 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T04:49:38,768 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T04:49:38,768 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T04:49:38,768 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T04:49:38,768 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T04:49:38,787 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T04:49:38,787 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T04:49:38,787 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T04:49:38,788 DEBUG [RS:0;4464c5b832df:39197 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41735c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4464c5b832df/172.17.0.2:0 2024-11-24T04:49:38,788 DEBUG [RS:2;4464c5b832df:39021 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23c2564, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4464c5b832df/172.17.0.2:0 2024-11-24T04:49:38,788 DEBUG [RS:1;4464c5b832df:46039 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@122b3cbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4464c5b832df/172.17.0.2:0 2024-11-24T04:49:38,807 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;4464c5b832df:39021 2024-11-24T04:49:38,807 INFO [AsyncFSWAL-0-hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData-prefix:4464c5b832df,34701,1732423776004 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-11-24T04:49:38,811 INFO [RS:2;4464c5b832df:39021 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T04:49:38,812 INFO [RS:2;4464c5b832df:39021 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T04:49:38,812 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T04:49:38,816 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4464c5b832df:39197 2024-11-24T04:49:38,816 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;4464c5b832df:46039 2024-11-24T04:49:38,816 INFO [RS:0;4464c5b832df:39197 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T04:49:38,817 INFO [RS:1;4464c5b832df:46039 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T04:49:38,817 INFO [RS:0;4464c5b832df:39197 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T04:49:38,817 INFO [RS:1;4464c5b832df:46039 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T04:49:38,817 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T04:49:38,817 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T04:49:38,819 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(2659): reportForDuty to master=4464c5b832df,34701,1732423776004 with port=46039, startcode=1732423777593 2024-11-24T04:49:38,819 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(2659): reportForDuty to master=4464c5b832df,34701,1732423776004 with port=39197, startcode=1732423777462 2024-11-24T04:49:38,820 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(2659): reportForDuty to master=4464c5b832df,34701,1732423776004 with port=39021, startcode=1732423777669 2024-11-24T04:49:38,827 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T04:49:38,833 DEBUG [RS:0;4464c5b832df:39197 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T04:49:38,833 DEBUG [RS:2;4464c5b832df:39021 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T04:49:38,834 DEBUG [RS:1;4464c5b832df:46039 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T04:49:38,838 INFO [master/4464c5b832df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T04:49:38,847 INFO [master/4464c5b832df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T04:49:38,855 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4464c5b832df,34701,1732423776004 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T04:49:38,875 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4464c5b832df:0, corePoolSize=5, maxPoolSize=5 2024-11-24T04:49:38,875 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4464c5b832df:0, corePoolSize=5, maxPoolSize=5 2024-11-24T04:49:38,877 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4464c5b832df:0, corePoolSize=5, maxPoolSize=5 2024-11-24T04:49:38,877 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4464c5b832df:0, corePoolSize=5, maxPoolSize=5 2024-11-24T04:49:38,878 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4464c5b832df:0, corePoolSize=10, maxPoolSize=10 2024-11-24T04:49:38,878 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:38,878 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4464c5b832df:0, corePoolSize=2, maxPoolSize=2 2024-11-24T04:49:38,878 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:38,894 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37651, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T04:49:38,894 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33957, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T04:49:38,895 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34525, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T04:49:38,901 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T04:49:38,901 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T04:49:38,902 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T04:49:38,907 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T04:49:38,908 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T04:49:38,910 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,910 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T04:49:38,917 INFO [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732423808917 2024-11-24T04:49:38,919 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T04:49:38,920 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T04:49:38,925 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T04:49:38,926 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T04:49:38,926 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T04:49:38,926 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T04:49:38,926 WARN [IPC Server handler 4 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:38,927 WARN [IPC Server handler 4 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:38,927 WARN [IPC Server handler 4 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:38,927 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:38,934 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-24T04:49:38,934 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-24T04:49:38,934 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-24T04:49:38,934 WARN [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-24T04:49:38,934 WARN [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-24T04:49:38,934 WARN [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-24T04:49:38,937 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T04:49:38,938 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T04:49:38,939 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T04:49:38,945 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T04:49:38,945 INFO [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T04:49:38,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741831_1007 (size=1321) 2024-11-24T04:49:38,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741831_1007 (size=1321) 2024-11-24T04:49:38,949 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4464c5b832df:0:becomeActiveMaster-HFileCleaner.large.0-1732423778948,5,FailOnTimeoutGroup] 2024-11-24T04:49:38,950 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4464c5b832df:0:becomeActiveMaster-HFileCleaner.small.0-1732423778949,5,FailOnTimeoutGroup] 2024-11-24T04:49:38,950 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:38,950 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T04:49:38,951 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:38,951 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:38,952 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T04:49:38,953 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f 2024-11-24T04:49:38,958 WARN [IPC Server handler 1 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:38,958 WARN [IPC Server handler 1 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:38,958 WARN [IPC Server handler 1 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:38,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741832_1008 (size=32) 2024-11-24T04:49:38,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741832_1008 (size=32) 2024-11-24T04:49:38,969 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:38,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T04:49:38,973 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T04:49:38,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:38,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T04:49:38,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T04:49:38,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:38,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T04:49:38,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T04:49:38,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:38,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T04:49:38,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T04:49:38,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:38,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:38,990 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T04:49:38,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740 2024-11-24T04:49:38,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740 2024-11-24T04:49:38,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T04:49:38,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T04:49:38,995 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T04:49:38,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T04:49:39,001 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:39,002 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67406172, jitterRate=0.004430234432220459}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T04:49:39,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732423778969Initializing all the Stores at 1732423778970 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732423778970Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732423778971 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423778971Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732423778971Cleaning up temporary data from old regions at 1732423778994 (+23 ms)Region opened successfully at 1732423779005 (+11 ms) 2024-11-24T04:49:39,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T04:49:39,006 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T04:49:39,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T04:49:39,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T04:49:39,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T04:49:39,008 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T04:49:39,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732423779006Disabling compacts and flushes for region at 1732423779006Disabling writes for close at 1732423779006Writing region close event to WAL at 1732423779007 (+1 ms)Closed at 1732423779007 2024-11-24T04:49:39,011 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T04:49:39,011 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T04:49:39,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T04:49:39,025 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,025 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T04:49:39,025 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,026 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,026 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,026 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,026 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,026 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,026 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,027 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,027 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,027 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,027 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,028 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,028 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,028 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,028 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T04:49:39,028 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,036 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(2659): reportForDuty to master=4464c5b832df,34701,1732423776004 with port=39197, startcode=1732423777462 2024-11-24T04:49:39,036 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(2659): reportForDuty to master=4464c5b832df,34701,1732423776004 with port=39021, startcode=1732423777669 2024-11-24T04:49:39,036 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(2659): reportForDuty to master=4464c5b832df,34701,1732423776004 with port=46039, startcode=1732423777593 2024-11-24T04:49:39,037 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4464c5b832df,39021,1732423777669 2024-11-24T04:49:39,039 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(517): Registering regionserver=4464c5b832df,39021,1732423777669 2024-11-24T04:49:39,047 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4464c5b832df,46039,1732423777593 2024-11-24T04:49:39,047 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(517): Registering regionserver=4464c5b832df,46039,1732423777593 2024-11-24T04:49:39,047 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f 2024-11-24T04:49:39,048 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38973 2024-11-24T04:49:39,048 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T04:49:39,051 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4464c5b832df,39197,1732423777462 2024-11-24T04:49:39,051 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f 2024-11-24T04:49:39,051 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38973 2024-11-24T04:49:39,051 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(517): Registering regionserver=4464c5b832df,39197,1732423777462 2024-11-24T04:49:39,051 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T04:49:39,054 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f 2024-11-24T04:49:39,054 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38973 2024-11-24T04:49:39,054 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T04:49:39,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T04:49:39,134 DEBUG [RS:2;4464c5b832df:39021 {}] zookeeper.ZKUtil(111): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4464c5b832df,39021,1732423777669 2024-11-24T04:49:39,135 WARN [RS:2;4464c5b832df:39021 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T04:49:39,135 DEBUG [RS:1;4464c5b832df:46039 {}] zookeeper.ZKUtil(111): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4464c5b832df,46039,1732423777593 2024-11-24T04:49:39,135 INFO [RS:2;4464c5b832df:39021 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:39,135 DEBUG [RS:0;4464c5b832df:39197 {}] zookeeper.ZKUtil(111): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4464c5b832df,39197,1732423777462 2024-11-24T04:49:39,135 WARN [RS:1;4464c5b832df:46039 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T04:49:39,136 WARN [RS:0;4464c5b832df:39197 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T04:49:39,136 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669 2024-11-24T04:49:39,136 INFO [RS:1;4464c5b832df:46039 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:39,136 INFO [RS:0;4464c5b832df:39197 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:39,136 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593 2024-11-24T04:49:39,136 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39197,1732423777462 2024-11-24T04:49:39,138 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4464c5b832df,39021,1732423777669] 2024-11-24T04:49:39,139 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4464c5b832df,46039,1732423777593] 2024-11-24T04:49:39,139 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4464c5b832df,39197,1732423777462] 2024-11-24T04:49:39,165 INFO [RS:0;4464c5b832df:39197 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T04:49:39,165 INFO [RS:2;4464c5b832df:39021 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T04:49:39,165 INFO [RS:1;4464c5b832df:46039 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T04:49:39,179 WARN [4464c5b832df:34701 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T04:49:39,182 INFO [RS:2;4464c5b832df:39021 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T04:49:39,183 INFO [RS:0;4464c5b832df:39197 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T04:49:39,183 INFO [RS:1;4464c5b832df:46039 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T04:49:39,188 INFO [RS:2;4464c5b832df:39021 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T04:49:39,188 INFO [RS:0;4464c5b832df:39197 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T04:49:39,188 INFO [RS:1;4464c5b832df:46039 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T04:49:39,189 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,189 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,189 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,190 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T04:49:39,190 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T04:49:39,190 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T04:49:39,196 INFO [RS:0;4464c5b832df:39197 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T04:49:39,196 INFO [RS:2;4464c5b832df:39021 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T04:49:39,196 INFO [RS:1;4464c5b832df:46039 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T04:49:39,197 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,197 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,197 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,198 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,198 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0, corePoolSize=2, maxPoolSize=2 2024-11-24T04:49:39,199 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0, corePoolSize=2, maxPoolSize=2 2024-11-24T04:49:39,199 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0, corePoolSize=2, maxPoolSize=2 2024-11-24T04:49:39,199 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,199 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,200 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,200 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4464c5b832df:0, corePoolSize=3, maxPoolSize=3 2024-11-24T04:49:39,200 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4464c5b832df:0, corePoolSize=3, maxPoolSize=3 2024-11-24T04:49:39,200 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4464c5b832df:0, corePoolSize=1, maxPoolSize=1 2024-11-24T04:49:39,200 DEBUG [RS:2;4464c5b832df:39021 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4464c5b832df:0, corePoolSize=3, maxPoolSize=3 2024-11-24T04:49:39,200 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4464c5b832df:0, corePoolSize=3, maxPoolSize=3 2024-11-24T04:49:39,200 DEBUG [RS:1;4464c5b832df:46039 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4464c5b832df:0, corePoolSize=3, maxPoolSize=3 2024-11-24T04:49:39,201 DEBUG [RS:0;4464c5b832df:39197 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4464c5b832df:0, corePoolSize=3, maxPoolSize=3 2024-11-24T04:49:39,202 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,202 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,202 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,202 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,202 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,203 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,39021,1732423777669-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,46039,1732423777593-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T04:49:39,208 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,39197,1732423777462-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T04:49:39,230 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T04:49:39,230 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T04:49:39,230 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T04:49:39,233 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,39197,1732423777462-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,233 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,46039,1732423777593-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,233 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,39021,1732423777669-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,233 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,233 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,233 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,233 INFO [RS:0;4464c5b832df:39197 {}] regionserver.Replication(171): 4464c5b832df,39197,1732423777462 started 2024-11-24T04:49:39,233 INFO [RS:2;4464c5b832df:39021 {}] regionserver.Replication(171): 4464c5b832df,39021,1732423777669 started 2024-11-24T04:49:39,233 INFO [RS:1;4464c5b832df:46039 {}] regionserver.Replication(171): 4464c5b832df,46039,1732423777593 started 2024-11-24T04:49:39,255 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,255 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1482): Serving as 4464c5b832df,39197,1732423777462, RpcServer on 4464c5b832df/172.17.0.2:39197, sessionid=0x1016b2cef8a0001 2024-11-24T04:49:39,256 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T04:49:39,256 DEBUG [RS:0;4464c5b832df:39197 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4464c5b832df,39197,1732423777462 2024-11-24T04:49:39,257 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,257 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:39,257 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4464c5b832df,39197,1732423777462' 2024-11-24T04:49:39,257 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1482): Serving as 4464c5b832df,39021,1732423777669, RpcServer on 4464c5b832df/172.17.0.2:39021, sessionid=0x1016b2cef8a0003 2024-11-24T04:49:39,257 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1482): Serving as 4464c5b832df,46039,1732423777593, RpcServer on 4464c5b832df/172.17.0.2:46039, sessionid=0x1016b2cef8a0002 2024-11-24T04:49:39,257 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T04:49:39,257 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T04:49:39,257 DEBUG [RS:2;4464c5b832df:39021 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4464c5b832df,39021,1732423777669 2024-11-24T04:49:39,257 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T04:49:39,257 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4464c5b832df,39021,1732423777669' 2024-11-24T04:49:39,257 DEBUG [RS:1;4464c5b832df:46039 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4464c5b832df,46039,1732423777593 2024-11-24T04:49:39,257 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T04:49:39,257 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4464c5b832df,46039,1732423777593' 2024-11-24T04:49:39,257 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T04:49:39,258 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T04:49:39,258 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T04:49:39,258 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T04:49:39,259 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T04:49:39,259 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T04:49:39,259 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T04:49:39,259 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T04:49:39,259 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T04:49:39,259 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T04:49:39,259 DEBUG [RS:1;4464c5b832df:46039 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4464c5b832df,46039,1732423777593 2024-11-24T04:49:39,259 DEBUG [RS:2;4464c5b832df:39021 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4464c5b832df,39021,1732423777669 2024-11-24T04:49:39,259 DEBUG [RS:0;4464c5b832df:39197 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4464c5b832df,39197,1732423777462 2024-11-24T04:49:39,260 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4464c5b832df,46039,1732423777593' 2024-11-24T04:49:39,260 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4464c5b832df,39197,1732423777462' 2024-11-24T04:49:39,260 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4464c5b832df,39021,1732423777669' 2024-11-24T04:49:39,260 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T04:49:39,260 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T04:49:39,260 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T04:49:39,260 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T04:49:39,260 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T04:49:39,261 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T04:49:39,261 DEBUG [RS:0;4464c5b832df:39197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T04:49:39,261 DEBUG [RS:1;4464c5b832df:46039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T04:49:39,261 DEBUG [RS:2;4464c5b832df:39021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T04:49:39,261 INFO [RS:0;4464c5b832df:39197 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T04:49:39,261 INFO [RS:1;4464c5b832df:46039 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T04:49:39,261 INFO [RS:2;4464c5b832df:39021 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T04:49:39,261 INFO [RS:0;4464c5b832df:39197 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T04:49:39,261 INFO [RS:1;4464c5b832df:46039 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T04:49:39,261 INFO [RS:2;4464c5b832df:39021 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T04:49:39,366 INFO [RS:2;4464c5b832df:39021 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:39,366 INFO [RS:0;4464c5b832df:39197 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:39,366 INFO [RS:1;4464c5b832df:46039 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:39,369 INFO [RS:2;4464c5b832df:39021 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4464c5b832df%2C39021%2C1732423777669, suffix=, logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669, archiveDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs, maxLogs=32 2024-11-24T04:49:39,369 INFO [RS:1;4464c5b832df:46039 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4464c5b832df%2C46039%2C1732423777593, suffix=, logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593, archiveDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs, maxLogs=32 2024-11-24T04:49:39,371 INFO [RS:0;4464c5b832df:39197 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4464c5b832df%2C39197%2C1732423777462, suffix=, logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39197,1732423777462, archiveDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs, maxLogs=32 2024-11-24T04:49:39,390 DEBUG [RS:2;4464c5b832df:39021 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669/4464c5b832df%2C39021%2C1732423777669.1732423779373, exclude list is [], retry=0 2024-11-24T04:49:39,391 DEBUG [RS:0;4464c5b832df:39197 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39197,1732423777462/4464c5b832df%2C39197%2C1732423777462.1732423779373, exclude list is [], retry=0 2024-11-24T04:49:39,391 DEBUG [RS:1;4464c5b832df:46039 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593/4464c5b832df%2C46039%2C1732423777593.1732423779373, exclude list is [], retry=0 2024-11-24T04:49:39,394 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,395 WARN [IPC Server handler 1 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,395 WARN [IPC Server handler 2 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,395 WARN [IPC Server handler 1 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,395 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,395 WARN [IPC Server handler 1 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,396 WARN [IPC Server handler 3 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,396 WARN [IPC Server handler 3 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,397 WARN [IPC Server handler 3 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:39,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:39,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:39,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:39,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:39,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:39,433 INFO [RS:0;4464c5b832df:39197 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39197,1732423777462/4464c5b832df%2C39197%2C1732423777462.1732423779373 2024-11-24T04:49:39,433 INFO [RS:1;4464c5b832df:46039 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593/4464c5b832df%2C46039%2C1732423777593.1732423779373 2024-11-24T04:49:39,434 DEBUG [RS:0;4464c5b832df:39197 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:39,436 DEBUG [RS:1;4464c5b832df:46039 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:39,438 INFO [RS:2;4464c5b832df:39021 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669/4464c5b832df%2C39021%2C1732423777669.1732423779373 2024-11-24T04:49:39,438 DEBUG [RS:2;4464c5b832df:39021 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:39,682 DEBUG [4464c5b832df:34701 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-24T04:49:39,689 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(204): Hosts are {4464c5b832df=0} racks are {/default-rack=0} 2024-11-24T04:49:39,696 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-24T04:49:39,696 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-24T04:49:39,696 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-24T04:49:39,696 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-24T04:49:39,696 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-24T04:49:39,697 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-24T04:49:39,697 INFO [4464c5b832df:34701 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-24T04:49:39,697 INFO [4464c5b832df:34701 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-24T04:49:39,697 INFO [4464c5b832df:34701 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-24T04:49:39,697 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-24T04:49:39,704 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4464c5b832df,46039,1732423777593 2024-11-24T04:49:39,709 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4464c5b832df,46039,1732423777593, state=OPENING 2024-11-24T04:49:39,733 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T04:49:39,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:39,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:39,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:39,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:49:39,745 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:39,745 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:39,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:39,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:39,747 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T04:49:39,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4464c5b832df,46039,1732423777593}] 2024-11-24T04:49:39,924 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T04:49:39,927 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42469, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T04:49:39,940 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T04:49:39,941 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:39,942 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-24T04:49:39,946 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4464c5b832df%2C46039%2C1732423777593.meta, suffix=.meta, logDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593, archiveDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs, maxLogs=32 2024-11-24T04:49:39,965 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593/4464c5b832df%2C46039%2C1732423777593.meta.1732423779948.meta, exclude list is [], retry=0 2024-11-24T04:49:39,967 WARN [IPC Server handler 0 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:39,968 WARN [IPC Server handler 0 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:39,968 WARN [IPC Server handler 0 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:39,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:39,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:39,975 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593/4464c5b832df%2C46039%2C1732423777593.meta.1732423779948.meta 2024-11-24T04:49:39,976 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:39,976 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:39,978 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T04:49:39,981 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T04:49:39,985 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T04:49:39,989 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T04:49:39,990 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:39,990 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T04:49:39,990 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T04:49:39,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T04:49:39,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T04:49:39,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:39,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:39,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T04:49:39,999 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T04:49:39,999 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:39,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:40,000 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T04:49:40,001 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T04:49:40,001 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:40,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:40,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T04:49:40,004 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T04:49:40,004 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:40,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T04:49:40,005 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T04:49:40,006 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740 2024-11-24T04:49:40,009 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740 2024-11-24T04:49:40,011 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T04:49:40,011 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T04:49:40,012 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T04:49:40,014 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T04:49:40,016 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63017118, jitterRate=-0.06097176671028137}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T04:49:40,016 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T04:49:40,017 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732423779991Writing region info on filesystem at 1732423779991Initializing all the Stores at 1732423779993 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732423779994 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732423779994Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423779994Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732423779994Cleaning up temporary data from old regions at 1732423780011 (+17 ms)Running coprocessor post-open hooks at 1732423780016 (+5 ms)Region opened successfully at 1732423780017 (+1 ms) 2024-11-24T04:49:40,024 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732423779914 2024-11-24T04:49:40,036 DEBUG [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T04:49:40,037 INFO [RS_OPEN_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T04:49:40,039 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4464c5b832df,46039,1732423777593 2024-11-24T04:49:40,041 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4464c5b832df,46039,1732423777593, state=OPEN 2024-11-24T04:49:40,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T04:49:40,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T04:49:40,049 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:40,049 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:40,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T04:49:40,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T04:49:40,050 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4464c5b832df,46039,1732423777593 2024-11-24T04:49:40,050 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:40,050 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T04:49:40,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T04:49:40,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4464c5b832df,46039,1732423777593 in 301 msec 2024-11-24T04:49:40,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T04:49:40,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0420 sec 2024-11-24T04:49:40,068 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T04:49:40,069 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T04:49:40,092 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T04:49:40,093 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4464c5b832df,46039,1732423777593, seqNum=-1] 2024-11-24T04:49:40,119 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T04:49:40,122 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49953, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T04:49:40,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3930 sec 2024-11-24T04:49:40,144 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732423780144, completionTime=-1 2024-11-24T04:49:40,147 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-24T04:49:40,147 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T04:49:40,198 INFO [master/4464c5b832df:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-24T04:49:40,199 INFO [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732423840198 2024-11-24T04:49:40,199 INFO [master/4464c5b832df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732423900199 2024-11-24T04:49:40,199 INFO [master/4464c5b832df:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 51 msec 2024-11-24T04:49:40,201 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-24T04:49:40,214 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,34701,1732423776004-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:40,214 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,34701,1732423776004-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:40,214 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,34701,1732423776004-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:40,216 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4464c5b832df:34701, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:40,217 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:40,217 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:40,225 DEBUG [master/4464c5b832df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T04:49:40,252 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.412sec 2024-11-24T04:49:40,255 INFO [master/4464c5b832df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T04:49:40,257 INFO [master/4464c5b832df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T04:49:40,258 INFO [master/4464c5b832df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T04:49:40,259 INFO [master/4464c5b832df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T04:49:40,259 INFO [master/4464c5b832df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T04:49:40,261 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,34701,1732423776004-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T04:49:40,261 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,34701,1732423776004-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T04:49:40,266 DEBUG [master/4464c5b832df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T04:49:40,267 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T04:49:40,268 INFO [master/4464c5b832df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4464c5b832df,34701,1732423776004-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:40,353 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c470bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T04:49:40,354 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4464c5b832df,34701,-1 for getting cluster id 2024-11-24T04:49:40,357 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T04:49:40,368 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8c0792d3-101b-420e-9c9e-da22fc7d020f' 2024-11-24T04:49:40,371 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T04:49:40,371 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8c0792d3-101b-420e-9c9e-da22fc7d020f" 2024-11-24T04:49:40,371 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bf04316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T04:49:40,371 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4464c5b832df,34701,-1] 2024-11-24T04:49:40,374 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T04:49:40,376 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:49:40,378 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42258, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T04:49:40,380 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ebbdb23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T04:49:40,381 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T04:49:40,388 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4464c5b832df,46039,1732423777593, seqNum=-1] 2024-11-24T04:49:40,389 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T04:49:40,391 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53494, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T04:49:40,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4464c5b832df,34701,1732423776004 2024-11-24T04:49:40,413 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:38973/hbase 2024-11-24T04:49:40,429 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=355, OpenFileDescriptor=583, MaxFileDescriptor=1048576, SystemLoadAverage=568, ProcessCount=11, AvailableMemoryMB=11442 2024-11-24T04:49:40,454 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:40,458 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:40,459 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:40,465 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-63399408, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-63399408, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:40,480 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-63399408/hregion-63399408.1732423780466, exclude list is [], retry=0 2024-11-24T04:49:40,484 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:40,484 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:40,487 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:40,497 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-63399408/hregion-63399408.1732423780466 2024-11-24T04:49:40,497 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:40,498 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 668681cf6e76e7820df2c69086c8b483, NAME => 'testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:49:40,502 WARN [IPC Server handler 1 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:40,502 WARN [IPC Server handler 1 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:40,502 WARN [IPC Server handler 1 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:40,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741838_1014 (size=64) 2024-11-24T04:49:40,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741838_1014 (size=64) 2024-11-24T04:49:40,511 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:40,513 INFO [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,515 INFO [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 668681cf6e76e7820df2c69086c8b483 columnFamilyName a 2024-11-24T04:49:40,516 DEBUG [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:40,516 INFO [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] regionserver.HStore(327): Store=668681cf6e76e7820df2c69086c8b483/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:40,517 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,518 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,519 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,520 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,520 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,523 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,527 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:40,527 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 668681cf6e76e7820df2c69086c8b483; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60402643, jitterRate=-0.09993048012256622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:49:40,528 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 668681cf6e76e7820df2c69086c8b483: Writing region info on filesystem at 1732423780511Initializing all the Stores at 1732423780513 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423780513Cleaning up temporary data from old regions at 1732423780520 (+7 ms)Region opened successfully at 1732423780528 (+8 ms) 2024-11-24T04:49:40,529 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 668681cf6e76e7820df2c69086c8b483, disabling compactions & flushes 2024-11-24T04:49:40,529 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483. 2024-11-24T04:49:40,529 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483. 2024-11-24T04:49:40,529 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483. after waiting 0 ms 2024-11-24T04:49:40,529 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483. 2024-11-24T04:49:40,529 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483. 2024-11-24T04:49:40,529 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 668681cf6e76e7820df2c69086c8b483: Waiting for close lock at 1732423780529Disabling compacts and flushes for region at 1732423780529Disabling writes for close at 1732423780529Writing region close event to WAL at 1732423780529Closed at 1732423780529 2024-11-24T04:49:40,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741837_1013 (size=95) 2024-11-24T04:49:40,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741837_1013 (size=95) 2024-11-24T04:49:40,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741837_1013 (size=95) 2024-11-24T04:49:40,543 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:40,543 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-63399408:(num 1732423780466) 2024-11-24T04:49:40,545 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-24T04:49:40,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741839_1015 (size=320) 2024-11-24T04:49:40,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741839_1015 (size=320) 2024-11-24T04:49:40,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741839_1015 (size=320) 2024-11-24T04:49:40,568 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-24T04:49:40,571 WARN [IPC Server handler 0 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:40,571 WARN [IPC Server handler 0 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:40,572 WARN [IPC Server handler 0 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:40,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741840_1016 (size=253) 2024-11-24T04:49:40,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741840_1016 (size=253) 2024-11-24T04:49:40,615 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1, size=320 (320bytes) 2024-11-24T04:49:40,616 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-24T04:49:40,616 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-24T04:49:40,616 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1 2024-11-24T04:49:40,623 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1 after 4ms 2024-11-24T04:49:40,631 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:40,632 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1 took 19ms 2024-11-24T04:49:40,646 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1 so closing down 2024-11-24T04:49:40,646 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:40,650 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-24T04:49:40,652 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000001-wal-1.temp 2024-11-24T04:49:40,653 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:40,656 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:40,656 WARN [IPC Server handler 2 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:40,656 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:40,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741841_1017 (size=320) 2024-11-24T04:49:40,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741841_1017 (size=320) 2024-11-24T04:49:40,667 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-24T04:49:40,670 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002 2024-11-24T04:49:40,676 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 37 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-24T04:49:40,676 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1, journal: Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1, size=320 (320bytes) at 1732423780615Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1 so closing down at 1732423780646 (+31 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000001-wal-1.temp at 1732423780652 (+6 ms)3 split writer threads finished at 1732423780653 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1732423780667 (+14 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002 at 1732423780670 (+3 ms)Processed 2 edits across 1 Regions in 37 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1732423780676 (+6 ms) 2024-11-24T04:49:40,699 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2, size=253 (253bytes) 2024-11-24T04:49:40,700 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2 2024-11-24T04:49:40,701 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2 after 1ms 2024-11-24T04:49:40,709 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:40,710 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2 took 11ms 2024-11-24T04:49:40,719 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2 so closing down 2024-11-24T04:49:40,719 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:40,724 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-24T04:49:40,726 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002-wal-2.temp 2024-11-24T04:49:40,726 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:40,729 WARN [IPC Server handler 4 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:40,729 WARN [IPC Server handler 4 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:40,729 WARN [IPC Server handler 4 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741842_1018 (size=253) 2024-11-24T04:49:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741842_1018 (size=253) 2024-11-24T04:49:40,745 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-24T04:49:40,751 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:40,759 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-11-24T04:49:40,762 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 48 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-24T04:49:40,762 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2, journal: Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2, size=253 (253bytes) at 1732423780699Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2 so closing down at 1732423780719 (+20 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002-wal-2.temp at 1732423780726 (+7 ms)3 split writer threads finished at 1732423780726Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1732423780745 (+19 ms)Processed 1 edits across 1 Regions in 48 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1732423780762 (+17 ms) 2024-11-24T04:49:40,762 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:40,767 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:40,785 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal.1732423780767, exclude list is [], retry=0 2024-11-24T04:49:40,787 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:40,787 WARN [IPC Server handler 2 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:40,787 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:40,793 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:40,793 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:40,800 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal.1732423780767 2024-11-24T04:49:40,808 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:40,808 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 668681cf6e76e7820df2c69086c8b483, NAME => 'testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:40,809 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:40,809 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,809 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,811 INFO [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,813 INFO [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 668681cf6e76e7820df2c69086c8b483 columnFamilyName a 2024-11-24T04:49:40,813 DEBUG [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:40,814 INFO [StoreOpener-668681cf6e76e7820df2c69086c8b483-1 {}] regionserver.HStore(327): Store=668681cf6e76e7820df2c69086c8b483/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:40,814 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,816 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,820 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,822 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002 2024-11-24T04:49:40,825 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:40,830 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002 2024-11-24T04:49:40,834 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 668681cf6e76e7820df2c69086c8b483 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-24T04:49:40,891 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/.tmp/a/0c47dda44b4343cf9151b5159e58be36 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1732423780543/Put/seqid=0 2024-11-24T04:49:40,896 WARN [IPC Server handler 3 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:40,896 WARN [IPC Server handler 3 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:40,897 WARN [IPC Server handler 3 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:40,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741844_1020 (size=5170) 2024-11-24T04:49:40,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741844_1020 (size=5170) 2024-11-24T04:49:40,905 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/.tmp/a/0c47dda44b4343cf9151b5159e58be36 2024-11-24T04:49:40,945 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/.tmp/a/0c47dda44b4343cf9151b5159e58be36 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/a/0c47dda44b4343cf9151b5159e58be36 2024-11-24T04:49:40,954 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/a/0c47dda44b4343cf9151b5159e58be36, entries=2, sequenceid=2, filesize=5.0 K 2024-11-24T04:49:40,959 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 668681cf6e76e7820df2c69086c8b483 in 124ms, sequenceid=2, compaction requested=false; wal=null 2024-11-24T04:49:40,961 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/0000000000000000002 2024-11-24T04:49:40,961 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,961 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,964 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 668681cf6e76e7820df2c69086c8b483 2024-11-24T04:49:40,967 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/668681cf6e76e7820df2c69086c8b483/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-24T04:49:40,969 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 668681cf6e76e7820df2c69086c8b483; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69040462, jitterRate=0.028783053159713745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:49:40,970 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 668681cf6e76e7820df2c69086c8b483: Writing region info on filesystem at 1732423780809Initializing all the Stores at 1732423780811 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423780811Obtaining lock to block concurrent updates at 1732423780834 (+23 ms)Preparing flush snapshotting stores in 668681cf6e76e7820df2c69086c8b483 at 1732423780834Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1732423780838 (+4 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1732423780455.668681cf6e76e7820df2c69086c8b483. at 1732423780838Flushing 668681cf6e76e7820df2c69086c8b483/a: creating writer at 1732423780840 (+2 ms)Flushing 668681cf6e76e7820df2c69086c8b483/a: appending metadata at 1732423780880 (+40 ms)Flushing 668681cf6e76e7820df2c69086c8b483/a: closing flushed file at 1732423780883 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12ec6c26: reopening flushed file at 1732423780943 (+60 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 668681cf6e76e7820df2c69086c8b483 in 124ms, sequenceid=2, compaction requested=false; wal=null at 1732423780959 (+16 ms)Cleaning up temporary data from old regions at 1732423780961 (+2 ms)Region opened successfully at 1732423780970 (+9 ms) 2024-11-24T04:49:40,996 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=365 (was 355) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53672 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53032 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53748 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:38973/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53012 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=669 (was 583) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=568 (was 568), ProcessCount=11 (was 11), AvailableMemoryMB=11399 (was 11442) 2024-11-24T04:49:41,009 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=365, OpenFileDescriptor=669, MaxFileDescriptor=1048576, SystemLoadAverage=568, ProcessCount=11, AvailableMemoryMB=11397 2024-11-24T04:49:41,031 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:41,034 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:41,035 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:41,038 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-25378303, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-25378303, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:41,051 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-25378303/hregion-25378303.1732423781039, exclude list is [], retry=0 2024-11-24T04:49:41,054 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:41,054 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:41,054 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:41,057 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-25378303/hregion-25378303.1732423781039 2024-11-24T04:49:41,057 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:41,058 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 4f6fc9c9a7a989b0235636125c01ec53, NAME => 'testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:49:41,062 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T04:49:41,062 WARN [IPC Server handler 2 on default port 38973 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T04:49:41,062 WARN [IPC Server handler 2 on default port 38973 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T04:49:41,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741846_1022 (size=64) 2024-11-24T04:49:41,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741846_1022 (size=64) 2024-11-24T04:49:41,069 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:41,072 INFO [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,075 INFO [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f6fc9c9a7a989b0235636125c01ec53 columnFamilyName a 2024-11-24T04:49:41,075 DEBUG [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:41,076 INFO [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] regionserver.HStore(327): Store=4f6fc9c9a7a989b0235636125c01ec53/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:41,077 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,078 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,078 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,079 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,079 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,081 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,084 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:41,085 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4f6fc9c9a7a989b0235636125c01ec53; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67038788, jitterRate=-0.0010442137718200684}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:49:41,085 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4f6fc9c9a7a989b0235636125c01ec53: Writing region info on filesystem at 1732423781069Initializing all the Stores at 1732423781070 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423781070Cleaning up temporary data from old regions at 1732423781079 (+9 ms)Region opened successfully at 1732423781085 (+6 ms) 2024-11-24T04:49:41,085 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 4f6fc9c9a7a989b0235636125c01ec53, disabling compactions & flushes 2024-11-24T04:49:41,085 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53. 2024-11-24T04:49:41,085 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53. 2024-11-24T04:49:41,086 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53. after waiting 0 ms 2024-11-24T04:49:41,086 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53. 2024-11-24T04:49:41,086 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53. 2024-11-24T04:49:41,086 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 4f6fc9c9a7a989b0235636125c01ec53: Waiting for close lock at 1732423781085Disabling compacts and flushes for region at 1732423781085Disabling writes for close at 1732423781086 (+1 ms)Writing region close event to WAL at 1732423781086Closed at 1732423781086 2024-11-24T04:49:41,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741845_1021 (size=95) 2024-11-24T04:49:41,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741845_1021 (size=95) 2024-11-24T04:49:41,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741845_1021 (size=95) 2024-11-24T04:49:41,093 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:41,093 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-25378303:(num 1732423781039) 2024-11-24T04:49:41,094 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-24T04:49:41,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741847_1023 (size=320) 2024-11-24T04:49:41,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741847_1023 (size=320) 2024-11-24T04:49:41,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741847_1023 (size=320) 2024-11-24T04:49:41,109 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-24T04:49:41,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741848_1024 (size=253) 2024-11-24T04:49:41,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741848_1024 (size=253) 2024-11-24T04:49:41,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741848_1024 (size=253) 2024-11-24T04:49:41,139 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2, size=253 (253bytes) 2024-11-24T04:49:41,139 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2 2024-11-24T04:49:41,140 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2 after 1ms 2024-11-24T04:49:41,144 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:41,144 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2 took 5ms 2024-11-24T04:49:41,146 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2 so closing down 2024-11-24T04:49:41,147 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:41,149 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-24T04:49:41,150 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002-wal-2.temp 2024-11-24T04:49:41,151 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:41,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741849_1025 (size=253) 2024-11-24T04:49:41,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741849_1025 (size=253) 2024-11-24T04:49:41,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741849_1025 (size=253) 2024-11-24T04:49:41,163 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-24T04:49:41,165 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002 2024-11-24T04:49:41,166 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 22 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-24T04:49:41,166 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2, journal: Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2, size=253 (253bytes) at 1732423781139Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2 so closing down at 1732423781147 (+8 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002-wal-2.temp at 1732423781150 (+3 ms)3 split writer threads finished at 1732423781151 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1732423781164 (+13 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002 at 1732423781165 (+1 ms)Processed 1 edits across 1 Regions in 22 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1732423781166 (+1 ms) 2024-11-24T04:49:41,180 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1, size=320 (320bytes) 2024-11-24T04:49:41,180 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1 2024-11-24T04:49:41,181 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1 after 1ms 2024-11-24T04:49:41,184 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:41,184 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1 took 4ms 2024-11-24T04:49:41,187 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1 so closing down 2024-11-24T04:49:41,187 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:41,189 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-24T04:49:41,191 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000001-wal-1.temp 2024-11-24T04:49:41,192 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:41,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741850_1026 (size=320) 2024-11-24T04:49:41,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741850_1026 (size=320) 2024-11-24T04:49:41,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741850_1026 (size=320) 2024-11-24T04:49:41,201 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-24T04:49:41,206 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:41,209 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002, length=253 2024-11-24T04:49:41,212 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002 2024-11-24T04:49:41,212 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 27 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-24T04:49:41,213 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1, journal: Splitting hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1, size=320 (320bytes) at 1732423781180Finishing writing output for hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1 so closing down at 1732423781187 (+7 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000001-wal-1.temp at 1732423781191 (+4 ms)3 split writer threads finished at 1732423781192 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1732423781201 (+9 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002 at 1732423781212 (+11 ms)Processed 2 edits across 1 Regions in 27 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1732423781212 2024-11-24T04:49:41,213 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:41,216 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:41,230 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal.1732423781216, exclude list is [], retry=0 2024-11-24T04:49:41,234 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:41,234 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:41,235 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:41,237 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal.1732423781216 2024-11-24T04:49:41,238 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:41,238 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 4f6fc9c9a7a989b0235636125c01ec53, NAME => 'testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:41,238 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:41,238 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,238 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,241 INFO [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,243 INFO [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f6fc9c9a7a989b0235636125c01ec53 columnFamilyName a 2024-11-24T04:49:41,243 DEBUG [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:41,244 INFO [StoreOpener-4f6fc9c9a7a989b0235636125c01ec53-1 {}] regionserver.HStore(327): Store=4f6fc9c9a7a989b0235636125c01ec53/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:41,244 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,245 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,247 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,248 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002 2024-11-24T04:49:41,251 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:41,253 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002 2024-11-24T04:49:41,253 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4f6fc9c9a7a989b0235636125c01ec53 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-24T04:49:41,269 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/.tmp/a/27d40df0f3504351a76f0aa96f9fb51c is 58, key is testReplayEditsWrittenIntoWAL/a:1/1732423781093/Put/seqid=0 2024-11-24T04:49:41,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741852_1028 (size=5170) 2024-11-24T04:49:41,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741852_1028 (size=5170) 2024-11-24T04:49:41,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741852_1028 (size=5170) 2024-11-24T04:49:41,278 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/.tmp/a/27d40df0f3504351a76f0aa96f9fb51c 2024-11-24T04:49:41,287 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/.tmp/a/27d40df0f3504351a76f0aa96f9fb51c as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/a/27d40df0f3504351a76f0aa96f9fb51c 2024-11-24T04:49:41,295 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/a/27d40df0f3504351a76f0aa96f9fb51c, entries=2, sequenceid=2, filesize=5.0 K 2024-11-24T04:49:41,296 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 4f6fc9c9a7a989b0235636125c01ec53 in 42ms, sequenceid=2, compaction requested=false; wal=null 2024-11-24T04:49:41,297 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/0000000000000000002 2024-11-24T04:49:41,297 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,297 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,300 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4f6fc9c9a7a989b0235636125c01ec53 2024-11-24T04:49:41,303 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/4f6fc9c9a7a989b0235636125c01ec53/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-24T04:49:41,304 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4f6fc9c9a7a989b0235636125c01ec53; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72737168, jitterRate=0.08386826515197754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:49:41,305 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4f6fc9c9a7a989b0235636125c01ec53: Writing region info on filesystem at 1732423781239Initializing all the Stores at 1732423781240 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423781240Obtaining lock to block concurrent updates at 1732423781253 (+13 ms)Preparing flush snapshotting stores in 4f6fc9c9a7a989b0235636125c01ec53 at 1732423781253Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1732423781253Flushing stores of testReplayEditsWrittenIntoWAL,,1732423781032.4f6fc9c9a7a989b0235636125c01ec53. at 1732423781253Flushing 4f6fc9c9a7a989b0235636125c01ec53/a: creating writer at 1732423781253Flushing 4f6fc9c9a7a989b0235636125c01ec53/a: appending metadata at 1732423781268 (+15 ms)Flushing 4f6fc9c9a7a989b0235636125c01ec53/a: closing flushed file at 1732423781268Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52b87b50: reopening flushed file at 1732423781285 (+17 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 4f6fc9c9a7a989b0235636125c01ec53 in 42ms, sequenceid=2, compaction requested=false; wal=null at 1732423781296 (+11 ms)Cleaning up temporary data from old regions at 1732423781297 (+1 ms)Region opened successfully at 1732423781305 (+8 ms) 2024-11-24T04:49:41,320 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=375 (was 365) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53098 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53672 [Waiting for operation #24] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:44096 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:44014 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53824 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53012 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=755 (was 669) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=568 (was 568), ProcessCount=11 (was 11), AvailableMemoryMB=11382 (was 11397) 2024-11-24T04:49:41,330 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=375, OpenFileDescriptor=755, MaxFileDescriptor=1048576, SystemLoadAverage=568, ProcessCount=11, AvailableMemoryMB=11382 2024-11-24T04:49:41,346 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:41,348 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:41,349 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:41,352 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-65404606, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-65404606, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:41,363 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-65404606/hregion-65404606.1732423781352, exclude list is [], retry=0 2024-11-24T04:49:41,367 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:41,367 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:41,367 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:41,369 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-65404606/hregion-65404606.1732423781352 2024-11-24T04:49:41,370 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:41,370 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => d952f2f0a15e631bd0af008bfecd3e30, NAME => 'testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:49:41,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741854_1030 (size=64) 2024-11-24T04:49:41,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741854_1030 (size=64) 2024-11-24T04:49:41,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741854_1030 (size=64) 2024-11-24T04:49:41,382 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:41,383 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,385 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d952f2f0a15e631bd0af008bfecd3e30 columnFamilyName a 2024-11-24T04:49:41,385 DEBUG [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:41,386 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(327): Store=d952f2f0a15e631bd0af008bfecd3e30/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:41,386 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,388 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d952f2f0a15e631bd0af008bfecd3e30 columnFamilyName b 2024-11-24T04:49:41,388 DEBUG [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:41,388 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(327): Store=d952f2f0a15e631bd0af008bfecd3e30/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:41,389 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,390 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d952f2f0a15e631bd0af008bfecd3e30 columnFamilyName c 2024-11-24T04:49:41,390 DEBUG [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:41,391 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(327): Store=d952f2f0a15e631bd0af008bfecd3e30/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:41,391 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,392 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,393 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,394 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,394 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,395 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:41,396 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:41,399 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:41,400 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d952f2f0a15e631bd0af008bfecd3e30; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63390331, jitterRate=-0.05541045963764191}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:41,401 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d952f2f0a15e631bd0af008bfecd3e30: Writing region info on filesystem at 1732423781382Initializing all the Stores at 1732423781383 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423781383Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423781383Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423781383Cleaning up temporary data from old regions at 1732423781394 (+11 ms)Region opened successfully at 1732423781400 (+6 ms) 2024-11-24T04:49:41,401 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d952f2f0a15e631bd0af008bfecd3e30, disabling compactions & flushes 2024-11-24T04:49:41,401 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:41,401 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:41,401 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. after waiting 0 ms 2024-11-24T04:49:41,401 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:41,402 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:41,402 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d952f2f0a15e631bd0af008bfecd3e30: Waiting for close lock at 1732423781401Disabling compacts and flushes for region at 1732423781401Disabling writes for close at 1732423781401Writing region close event to WAL at 1732423781401Closed at 1732423781401 2024-11-24T04:49:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741853_1029 (size=95) 2024-11-24T04:49:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741853_1029 (size=95) 2024-11-24T04:49:41,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741853_1029 (size=95) 2024-11-24T04:49:41,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:41,412 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-65404606:(num 1732423781352) 2024-11-24T04:49:41,412 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:41,415 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:41,428 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415, exclude list is [], retry=0 2024-11-24T04:49:41,431 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:41,431 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:41,431 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:41,433 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 2024-11-24T04:49:41,434 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:41,699 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415, size=0 (0bytes) 2024-11-24T04:49:41,699 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 might be still open, length is 0 2024-11-24T04:49:41,699 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 2024-11-24T04:49:41,700 WARN [IPC Server handler 0 on default port 38973 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-11-24T04:49:41,701 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 after 2ms 2024-11-24T04:49:44,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741831_1007 (size=1321) 2024-11-24T04:49:44,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741832_1008 (size=32) 2024-11-24T04:49:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741828_1004 (size=1189) 2024-11-24T04:49:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741829_1005 (size=34) 2024-11-24T04:49:44,099 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53856 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53856 dst: /127.0.0.1:44795 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44795 remote=/127.0.0.1:53856]. Total timeout mills is 60000, 57552 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:44,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:53140 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:36429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53140 dst: /127.0.0.1:36429 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:44,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:44134 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44134 dst: /127.0.0.1:44605 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:44,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741855_1032 (size=263633) 2024-11-24T04:49:44,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741855_1032 (size=263633) 2024-11-24T04:49:44,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741855_1032 (size=263633) 2024-11-24T04:49:45,508 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T04:49:45,565 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T04:49:45,703 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 after 4003ms 2024-11-24T04:49:45,712 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:45,714 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 took 4015ms 2024-11-24T04:49:45,720 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1732423781415.temp 2024-11-24T04:49:45,726 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000000001-wal.1732423781415.temp 2024-11-24T04:49:45,860 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415; continuing. 2024-11-24T04:49:45,860 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 so closing down 2024-11-24T04:49:45,860 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:45,860 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:45,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741856_1033 (size=263641) 2024-11-24T04:49:45,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741856_1033 (size=263641) 2024-11-24T04:49:45,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741856_1033 (size=263641) 2024-11-24T04:49:45,865 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000000001-wal.1732423781415.temp (wrote 3002 edits, skipped 0 edits in 74 ms) 2024-11-24T04:49:45,868 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000000001-wal.1732423781415.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000003002 2024-11-24T04:49:45,868 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 153 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415, size=0, length=0, corrupted=false, cancelled=false 2024-11-24T04:49:45,868 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415, journal: Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415, size=0 (0bytes) at 1732423781699Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000000001-wal.1732423781415.temp at 1732423785726 (+4027 ms)Split 1024 edits, skipped 0 edits. at 1732423785795 (+69 ms)Split 2048 edits, skipped 0 edits. at 1732423785831 (+36 ms)Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 so closing down at 1732423785860 (+29 ms)3 split writer threads finished at 1732423785860Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000000001-wal.1732423781415.temp (wrote 3002 edits, skipped 0 edits in 74 ms) at 1732423785865 (+5 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000000001-wal.1732423781415.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000003002 at 1732423785868 (+3 ms)Processed 3002 edits across 1 Regions in 153 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415, size=0, length=0, corrupted=false, cancelled=false at 1732423785868 2024-11-24T04:49:45,870 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423781415 2024-11-24T04:49:45,872 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000003002 2024-11-24T04:49:45,872 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:45,875 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:45,888 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423785875, exclude list is [], retry=0 2024-11-24T04:49:45,892 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:45,892 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:45,893 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:45,896 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423785875 2024-11-24T04:49:45,897 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:45,897 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:45,900 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:45,902 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d952f2f0a15e631bd0af008bfecd3e30 columnFamilyName a 2024-11-24T04:49:45,902 DEBUG [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:45,903 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(327): Store=d952f2f0a15e631bd0af008bfecd3e30/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:45,903 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:45,904 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d952f2f0a15e631bd0af008bfecd3e30 columnFamilyName b 2024-11-24T04:49:45,904 DEBUG [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:45,905 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(327): Store=d952f2f0a15e631bd0af008bfecd3e30/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:45,905 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:45,907 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d952f2f0a15e631bd0af008bfecd3e30 columnFamilyName c 2024-11-24T04:49:45,907 DEBUG [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:45,908 INFO [StoreOpener-d952f2f0a15e631bd0af008bfecd3e30-1 {}] regionserver.HStore(327): Store=d952f2f0a15e631bd0af008bfecd3e30/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:45,908 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:45,910 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:45,912 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:45,913 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000003002 2024-11-24T04:49:45,917 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:45,974 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-24T04:49:46,319 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d952f2f0a15e631bd0af008bfecd3e30 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-24T04:49:46,357 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/a/1926cbcb41d14b88b43a99587f388c69 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1732423781441/Put/seqid=0 2024-11-24T04:49:46,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741858_1035 (size=50463) 2024-11-24T04:49:46,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741858_1035 (size=50463) 2024-11-24T04:49:46,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741858_1035 (size=50463) 2024-11-24T04:49:46,369 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/a/1926cbcb41d14b88b43a99587f388c69 2024-11-24T04:49:46,377 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/a/1926cbcb41d14b88b43a99587f388c69 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/a/1926cbcb41d14b88b43a99587f388c69 2024-11-24T04:49:46,385 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/a/1926cbcb41d14b88b43a99587f388c69, entries=754, sequenceid=754, filesize=49.3 K 2024-11-24T04:49:46,385 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for d952f2f0a15e631bd0af008bfecd3e30 in 66ms, sequenceid=754, compaction requested=false; wal=null 2024-11-24T04:49:46,404 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-24T04:49:46,404 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d952f2f0a15e631bd0af008bfecd3e30 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-24T04:49:46,412 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/a/9851b7fa40de431baaf2d712bbe64d59 is 62, key is testReplayEditsWrittenIntoWAL/a:754/1732423781485/Put/seqid=0 2024-11-24T04:49:46,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741859_1036 (size=20072) 2024-11-24T04:49:46,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741859_1036 (size=20072) 2024-11-24T04:49:46,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741859_1036 (size=20072) 2024-11-24T04:49:46,421 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/a/9851b7fa40de431baaf2d712bbe64d59 2024-11-24T04:49:46,435 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T04:49:46,435 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T04:49:46,437 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T04:49:46,437 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T04:49:46,438 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T04:49:46,438 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T04:49:46,438 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-11-24T04:49:46,438 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-11-24T04:49:46,456 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/b/b9c746f298f5464faaa673971483a8d7 is 62, key is testReplayEditsWrittenIntoWAL/b:100/1732423781514/Put/seqid=0 2024-11-24T04:49:46,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741860_1037 (size=35835) 2024-11-24T04:49:46,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741860_1037 (size=35835) 2024-11-24T04:49:46,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741860_1037 (size=35835) 2024-11-24T04:49:46,465 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/b/b9c746f298f5464faaa673971483a8d7 2024-11-24T04:49:46,475 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/a/9851b7fa40de431baaf2d712bbe64d59 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/a/9851b7fa40de431baaf2d712bbe64d59 2024-11-24T04:49:46,485 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/a/9851b7fa40de431baaf2d712bbe64d59, entries=246, sequenceid=1508, filesize=19.6 K 2024-11-24T04:49:46,487 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/b/b9c746f298f5464faaa673971483a8d7 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/b/b9c746f298f5464faaa673971483a8d7 2024-11-24T04:49:46,495 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/b/b9c746f298f5464faaa673971483a8d7, entries=508, sequenceid=1508, filesize=35.0 K 2024-11-24T04:49:46,495 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for d952f2f0a15e631bd0af008bfecd3e30 in 91ms, sequenceid=1508, compaction requested=false; wal=null 2024-11-24T04:49:46,512 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-24T04:49:46,513 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d952f2f0a15e631bd0af008bfecd3e30 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-24T04:49:46,521 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/b/ece94b179da14fd3b760e4cc920c0c61 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1732423781543/Put/seqid=0 2024-11-24T04:49:46,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741861_1038 (size=35082) 2024-11-24T04:49:46,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741861_1038 (size=35082) 2024-11-24T04:49:46,532 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/b/ece94b179da14fd3b760e4cc920c0c61 2024-11-24T04:49:46,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741861_1038 (size=35082) 2024-11-24T04:49:46,562 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/c/066e9cc17a8345d18145806d3d6ec6d3 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1732423781584/Put/seqid=0 2024-11-24T04:49:46,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741862_1039 (size=20825) 2024-11-24T04:49:46,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741862_1039 (size=20825) 2024-11-24T04:49:46,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741862_1039 (size=20825) 2024-11-24T04:49:46,575 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/c/066e9cc17a8345d18145806d3d6ec6d3 2024-11-24T04:49:46,583 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/b/ece94b179da14fd3b760e4cc920c0c61 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/b/ece94b179da14fd3b760e4cc920c0c61 2024-11-24T04:49:46,592 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/b/ece94b179da14fd3b760e4cc920c0c61, entries=492, sequenceid=2262, filesize=34.3 K 2024-11-24T04:49:46,593 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/c/066e9cc17a8345d18145806d3d6ec6d3 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/c/066e9cc17a8345d18145806d3d6ec6d3 2024-11-24T04:49:46,601 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/c/066e9cc17a8345d18145806d3d6ec6d3, entries=262, sequenceid=2262, filesize=20.3 K 2024-11-24T04:49:46,602 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for d952f2f0a15e631bd0af008bfecd3e30 in 89ms, sequenceid=2262, compaction requested=false; wal=null 2024-11-24T04:49:46,621 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1732423781648/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:46,625 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000003002 2024-11-24T04:49:46,625 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-24T04:49:46,625 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d952f2f0a15e631bd0af008bfecd3e30 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-11-24T04:49:46,636 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/c/8e5079ebd6aa447d96a6b4cd5b430b34 is 62, key is testReplayEditsWrittenIntoWAL/c:262/1732423781595/Put/seqid=0 2024-11-24T04:49:46,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741863_1040 (size=50301) 2024-11-24T04:49:46,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741863_1040 (size=50301) 2024-11-24T04:49:46,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741863_1040 (size=50301) 2024-11-24T04:49:46,645 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/c/8e5079ebd6aa447d96a6b4cd5b430b34 2024-11-24T04:49:46,653 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8e5079ebd6aa447d96a6b4cd5b430b34 2024-11-24T04:49:46,655 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/.tmp/c/8e5079ebd6aa447d96a6b4cd5b430b34 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/c/8e5079ebd6aa447d96a6b4cd5b430b34 2024-11-24T04:49:46,662 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8e5079ebd6aa447d96a6b4cd5b430b34 2024-11-24T04:49:46,662 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/c/8e5079ebd6aa447d96a6b4cd5b430b34, entries=739, sequenceid=3002, filesize=49.1 K 2024-11-24T04:49:46,663 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for d952f2f0a15e631bd0af008bfecd3e30 in 38ms, sequenceid=3002, compaction requested=false; wal=null 2024-11-24T04:49:46,664 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/0000000000000003002 2024-11-24T04:49:46,665 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:46,665 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:46,666 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T04:49:46,668 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d952f2f0a15e631bd0af008bfecd3e30 2024-11-24T04:49:46,670 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenIntoWAL/d952f2f0a15e631bd0af008bfecd3e30/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-11-24T04:49:46,671 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d952f2f0a15e631bd0af008bfecd3e30; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65349965, jitterRate=-0.02620963752269745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T04:49:46,672 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d952f2f0a15e631bd0af008bfecd3e30: Writing region info on filesystem at 1732423785897Initializing all the Stores at 1732423785899 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423785899Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423785900 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423785900Cleaning up temporary data from old regions at 1732423786665 (+765 ms)Region opened successfully at 1732423786672 (+7 ms) 2024-11-24T04:49:46,739 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d952f2f0a15e631bd0af008bfecd3e30, disabling compactions & flushes 2024-11-24T04:49:46,739 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:46,739 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:46,739 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. after waiting 0 ms 2024-11-24T04:49:46,739 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:46,741 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1732423781347.d952f2f0a15e631bd0af008bfecd3e30. 2024-11-24T04:49:46,741 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d952f2f0a15e631bd0af008bfecd3e30: Waiting for close lock at 1732423786739Disabling compacts and flushes for region at 1732423786739Disabling writes for close at 1732423786739Writing region close event to WAL at 1732423786741 (+2 ms)Closed at 1732423786741 2024-11-24T04:49:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741857_1034 (size=95) 2024-11-24T04:49:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741857_1034 (size=95) 2024-11-24T04:49:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741857_1034 (size=95) 2024-11-24T04:49:46,750 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:46,750 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1732423785875) 2024-11-24T04:49:46,766 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=393 (was 375) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_403786944_22 at /127.0.0.1:33302 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:38973 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_403786944_22 at /127.0.0.1:38136 [Waiting for operation #19] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@61f8e97e[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_403786944_22 at /127.0.0.1:33276 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@4a2c5b84[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@323f31d1[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_403786944_22 at /127.0.0.1:59618 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@44ebbcb1[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (139959168) connection to localhost/127.0.0.1:46383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (139959168) connection to localhost/127.0.0.1:38973 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (139959168) connection to localhost/127.0.0.1:43375 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=835 (was 755) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=631 (was 568) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=11220 (was 11382) 2024-11-24T04:49:46,780 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=393, OpenFileDescriptor=835, MaxFileDescriptor=1048576, SystemLoadAverage=631, ProcessCount=11, AvailableMemoryMB=11218 2024-11-24T04:49:46,801 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:46,803 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:46,804 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:46,807 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-13296346, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-13296346, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:46,820 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-13296346/hregion-13296346.1732423786808, exclude list is [], retry=0 2024-11-24T04:49:46,823 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:46,823 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:46,824 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:46,866 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-13296346/hregion-13296346.1732423786808 2024-11-24T04:49:46,866 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:46,866 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 3293abd16dc9a9858191e905559dc902, NAME => 'test2727,,1732423786802.3293abd16dc9a9858191e905559dc902.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:49:46,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741865_1042 (size=43) 2024-11-24T04:49:46,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741865_1042 (size=43) 2024-11-24T04:49:46,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741865_1042 (size=43) 2024-11-24T04:49:46,879 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1732423786802.3293abd16dc9a9858191e905559dc902.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:46,881 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,883 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3293abd16dc9a9858191e905559dc902 columnFamilyName a 2024-11-24T04:49:46,883 DEBUG [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:46,884 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(327): Store=3293abd16dc9a9858191e905559dc902/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:46,884 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,886 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3293abd16dc9a9858191e905559dc902 columnFamilyName b 2024-11-24T04:49:46,886 DEBUG [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:46,887 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(327): Store=3293abd16dc9a9858191e905559dc902/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:46,887 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,890 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3293abd16dc9a9858191e905559dc902 columnFamilyName c 2024-11-24T04:49:46,890 DEBUG [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:46,890 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(327): Store=3293abd16dc9a9858191e905559dc902/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:46,891 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,891 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,892 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,893 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,893 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,894 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:46,896 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:46,899 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:46,900 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3293abd16dc9a9858191e905559dc902; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67124134, jitterRate=2.275407314300537E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:46,901 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3293abd16dc9a9858191e905559dc902: Writing region info on filesystem at 1732423786880Initializing all the Stores at 1732423786881 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423786881Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423786881Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423786881Cleaning up temporary data from old regions at 1732423786893 (+12 ms)Region opened successfully at 1732423786901 (+8 ms) 2024-11-24T04:49:46,902 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3293abd16dc9a9858191e905559dc902, disabling compactions & flushes 2024-11-24T04:49:46,902 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:46,902 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:46,902 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. after waiting 0 ms 2024-11-24T04:49:46,902 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:46,902 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:46,902 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3293abd16dc9a9858191e905559dc902: Waiting for close lock at 1732423786902Disabling compacts and flushes for region at 1732423786902Disabling writes for close at 1732423786902Writing region close event to WAL at 1732423786902Closed at 1732423786902 2024-11-24T04:49:46,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741864_1041 (size=95) 2024-11-24T04:49:46,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741864_1041 (size=95) 2024-11-24T04:49:46,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741864_1041 (size=95) 2024-11-24T04:49:46,909 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:46,909 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-13296346:(num 1732423786808) 2024-11-24T04:49:46,909 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:46,912 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:46,928 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912, exclude list is [], retry=0 2024-11-24T04:49:46,931 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:46,932 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:46,932 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:46,934 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912 2024-11-24T04:49:46,934 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:47,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741866_1043 (size=263359) 2024-11-24T04:49:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741866_1043 (size=263359) 2024-11-24T04:49:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741866_1043 (size=263359) 2024-11-24T04:49:47,119 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912, size=257.2 K (263359bytes) 2024-11-24T04:49:47,119 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912 2024-11-24T04:49:47,119 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912 after 0ms 2024-11-24T04:49:47,122 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:47,124 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912 took 5ms 2024-11-24T04:49:47,129 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1732423786912.temp 2024-11-24T04:49:47,131 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000000001-wal.1732423786912.temp 2024-11-24T04:49:47,186 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912 so closing down 2024-11-24T04:49:47,186 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:47,187 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:47,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741867_1044 (size=263359) 2024-11-24T04:49:47,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741867_1044 (size=263359) 2024-11-24T04:49:47,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741867_1044 (size=263359) 2024-11-24T04:49:47,593 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000000001-wal.1732423786912.temp (wrote 3000 edits, skipped 0 edits in 44 ms) 2024-11-24T04:49:47,596 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000000001-wal.1732423786912.temp to hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003000 2024-11-24T04:49:47,596 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 471 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-11-24T04:49:47,596 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912, journal: Splitting hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912, size=257.2 K (263359bytes) at 1732423787119Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000000001-wal.1732423786912.temp at 1732423787131 (+12 ms)Split 1024 edits, skipped 0 edits. at 1732423787147 (+16 ms)Split 2048 edits, skipped 0 edits. at 1732423787164 (+17 ms)Finishing writing output for hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912 so closing down at 1732423787186 (+22 ms)3 split writer threads finished at 1732423787187 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000000001-wal.1732423786912.temp (wrote 3000 edits, skipped 0 edits in 44 ms) at 1732423787594 (+407 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000000001-wal.1732423786912.temp to hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003000 at 1732423787596 (+2 ms)Processed 3000 edits across 1 Regions in 471 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1732423787596 2024-11-24T04:49:47,599 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423786912 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423786912 2024-11-24T04:49:47,601 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003000 2024-11-24T04:49:47,601 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:47,604 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:47,618 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604, exclude list is [], retry=0 2024-11-24T04:49:47,621 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:47,622 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:47,622 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:47,624 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604 2024-11-24T04:49:47,624 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:47,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741868_1045 (size=263486) 2024-11-24T04:49:47,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741868_1045 (size=263486) 2024-11-24T04:49:47,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741868_1045 (size=263486) 2024-11-24T04:49:47,793 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604, size=257.3 K (263486bytes) 2024-11-24T04:49:47,793 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604 2024-11-24T04:49:47,794 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604 after 1ms 2024-11-24T04:49:47,797 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:47,799 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604 took 6ms 2024-11-24T04:49:47,804 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1732423787604.temp 2024-11-24T04:49:47,814 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003001-wal.1732423787604.temp 2024-11-24T04:49:47,860 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604 so closing down 2024-11-24T04:49:47,860 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:47,860 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:47,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741869_1046 (size=263486) 2024-11-24T04:49:47,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741869_1046 (size=263486) 2024-11-24T04:49:47,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741869_1046 (size=263486) 2024-11-24T04:49:47,865 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003001-wal.1732423787604.temp (wrote 3000 edits, skipped 0 edits in 36 ms) 2024-11-24T04:49:47,867 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003001-wal.1732423787604.temp to hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000006000 2024-11-24T04:49:47,867 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 68 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-11-24T04:49:47,867 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604, journal: Splitting hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604, size=257.3 K (263486bytes) at 1732423787793Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003001-wal.1732423787604.temp at 1732423787814 (+21 ms)Split 1024 edits, skipped 0 edits. at 1732423787819 (+5 ms)Split 2048 edits, skipped 0 edits. at 1732423787840 (+21 ms)Finishing writing output for hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604 so closing down at 1732423787860 (+20 ms)3 split writer threads finished at 1732423787860Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003001-wal.1732423787604.temp (wrote 3000 edits, skipped 0 edits in 36 ms) at 1732423787865 (+5 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003001-wal.1732423787604.temp to hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000006000 at 1732423787867 (+2 ms)Processed 3000 edits across 1 Regions in 68 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1732423787867 2024-11-24T04:49:47,869 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787604 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423787604 2024-11-24T04:49:47,871 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000006000 2024-11-24T04:49:47,871 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:47,873 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/test2727-manual,16010,1732423786800, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:47,888 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787873, exclude list is [], retry=0 2024-11-24T04:49:47,892 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:47,892 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:47,893 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:47,895 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1732423786800/wal.1732423787873 2024-11-24T04:49:47,896 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:47,896 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 3293abd16dc9a9858191e905559dc902, NAME => 'test2727,,1732423786802.3293abd16dc9a9858191e905559dc902.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:47,896 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1732423786802.3293abd16dc9a9858191e905559dc902.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:47,896 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,897 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,899 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,900 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3293abd16dc9a9858191e905559dc902 columnFamilyName a 2024-11-24T04:49:47,900 DEBUG [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:47,901 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(327): Store=3293abd16dc9a9858191e905559dc902/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:47,901 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,903 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3293abd16dc9a9858191e905559dc902 columnFamilyName b 2024-11-24T04:49:47,903 DEBUG [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:47,904 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(327): Store=3293abd16dc9a9858191e905559dc902/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:47,904 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,905 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3293abd16dc9a9858191e905559dc902 columnFamilyName c 2024-11-24T04:49:47,906 DEBUG [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:47,906 INFO [StoreOpener-3293abd16dc9a9858191e905559dc902-1 {}] regionserver.HStore(327): Store=3293abd16dc9a9858191e905559dc902/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:47,906 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,907 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,910 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:47,911 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003000 2024-11-24T04:49:47,914 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:47,964 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003000 2024-11-24T04:49:47,965 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000006000 2024-11-24T04:49:47,968 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:48,026 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000006000 2024-11-24T04:49:48,026 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3293abd16dc9a9858191e905559dc902 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-11-24T04:49:48,056 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/a/a9d0411fcfb44e6cb4b91c48d5088109 is 41, key is test2727/a:100/1732423787628/Put/seqid=0 2024-11-24T04:49:48,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741871_1048 (size=84227) 2024-11-24T04:49:48,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741871_1048 (size=84227) 2024-11-24T04:49:48,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741871_1048 (size=84227) 2024-11-24T04:49:48,063 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/a/a9d0411fcfb44e6cb4b91c48d5088109 2024-11-24T04:49:48,100 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/b/4f68b80658e342ed920307fcd77cff1b is 41, key is test2727/b:100/1732423787683/Put/seqid=0 2024-11-24T04:49:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741872_1049 (size=84609) 2024-11-24T04:49:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741872_1049 (size=84609) 2024-11-24T04:49:48,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741872_1049 (size=84609) 2024-11-24T04:49:48,106 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/b/4f68b80658e342ed920307fcd77cff1b 2024-11-24T04:49:48,136 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/c/1dabc96712354ed38d6758658a48e2d5 is 41, key is test2727/c:100/1732423787732/Put/seqid=0 2024-11-24T04:49:48,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741873_1050 (size=84609) 2024-11-24T04:49:48,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741873_1050 (size=84609) 2024-11-24T04:49:48,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741873_1050 (size=84609) 2024-11-24T04:49:48,142 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/c/1dabc96712354ed38d6758658a48e2d5 2024-11-24T04:49:48,150 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/a/a9d0411fcfb44e6cb4b91c48d5088109 as hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/a/a9d0411fcfb44e6cb4b91c48d5088109 2024-11-24T04:49:48,157 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/a/a9d0411fcfb44e6cb4b91c48d5088109, entries=2000, sequenceid=6000, filesize=82.3 K 2024-11-24T04:49:48,159 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/b/4f68b80658e342ed920307fcd77cff1b as hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/b/4f68b80658e342ed920307fcd77cff1b 2024-11-24T04:49:48,166 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/b/4f68b80658e342ed920307fcd77cff1b, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-24T04:49:48,167 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/.tmp/c/1dabc96712354ed38d6758658a48e2d5 as hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/c/1dabc96712354ed38d6758658a48e2d5 2024-11-24T04:49:48,174 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/c/1dabc96712354ed38d6758658a48e2d5, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-24T04:49:48,175 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 3293abd16dc9a9858191e905559dc902 in 148ms, sequenceid=6000, compaction requested=false; wal=null 2024-11-24T04:49:48,175 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000003000 2024-11-24T04:49:48,176 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/0000000000000006000 2024-11-24T04:49:48,177 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:48,177 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:48,178 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:48,180 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3293abd16dc9a9858191e905559dc902 2024-11-24T04:49:48,182 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/test2727/3293abd16dc9a9858191e905559dc902/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-11-24T04:49:48,184 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3293abd16dc9a9858191e905559dc902; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62900756, jitterRate=-0.06270569562911987}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:48,186 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3293abd16dc9a9858191e905559dc902: Writing region info on filesystem at 1732423787897Initializing all the Stores at 1732423787898 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423787898Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423787898Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423787898Obtaining lock to block concurrent updates at 1732423788027 (+129 ms)Preparing flush snapshotting stores in 3293abd16dc9a9858191e905559dc902 at 1732423788027Finished memstore snapshotting test2727,,1732423786802.3293abd16dc9a9858191e905559dc902., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1732423788027Flushing stores of test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. at 1732423788027Flushing 3293abd16dc9a9858191e905559dc902/a: creating writer at 1732423788027Flushing 3293abd16dc9a9858191e905559dc902/a: appending metadata at 1732423788055 (+28 ms)Flushing 3293abd16dc9a9858191e905559dc902/a: closing flushed file at 1732423788055Flushing 3293abd16dc9a9858191e905559dc902/b: creating writer at 1732423788071 (+16 ms)Flushing 3293abd16dc9a9858191e905559dc902/b: appending metadata at 1732423788098 (+27 ms)Flushing 3293abd16dc9a9858191e905559dc902/b: closing flushed file at 1732423788098Flushing 3293abd16dc9a9858191e905559dc902/c: creating writer at 1732423788113 (+15 ms)Flushing 3293abd16dc9a9858191e905559dc902/c: appending metadata at 1732423788135 (+22 ms)Flushing 3293abd16dc9a9858191e905559dc902/c: closing flushed file at 1732423788135Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d799600: reopening flushed file at 1732423788148 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@547c2eaa: reopening flushed file at 1732423788157 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c73e76e: reopening flushed file at 1732423788166 (+9 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 3293abd16dc9a9858191e905559dc902 in 148ms, sequenceid=6000, compaction requested=false; wal=null at 1732423788175 (+9 ms)Cleaning up temporary data from old regions at 1732423788177 (+2 ms)Region opened successfully at 1732423788185 (+8 ms) 2024-11-24T04:49:48,187 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-11-24T04:49:48,187 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3293abd16dc9a9858191e905559dc902, disabling compactions & flushes 2024-11-24T04:49:48,187 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:48,187 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:48,187 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. after waiting 0 ms 2024-11-24T04:49:48,187 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:48,188 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1732423786802.3293abd16dc9a9858191e905559dc902. 2024-11-24T04:49:48,189 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3293abd16dc9a9858191e905559dc902: Waiting for close lock at 1732423788187Disabling compacts and flushes for region at 1732423788187Disabling writes for close at 1732423788187Writing region close event to WAL at 1732423788188 (+1 ms)Closed at 1732423788188 2024-11-24T04:49:48,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741870_1047 (size=95) 2024-11-24T04:49:48,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741870_1047 (size=95) 2024-11-24T04:49:48,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741870_1047 (size=95) 2024-11-24T04:49:48,195 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:48,195 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1732423787873) 2024-11-24T04:49:48,206 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=397 (was 393) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:38202 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:33276 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:59778 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=897 (was 835) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=631 (was 631), ProcessCount=11 (was 11), AvailableMemoryMB=11006 (was 11218) 2024-11-24T04:49:48,218 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=397, OpenFileDescriptor=897, MaxFileDescriptor=1048576, SystemLoadAverage=631, ProcessCount=11, AvailableMemoryMB=11005 2024-11-24T04:49:48,235 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:48,241 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:48,242 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1732423788242 2024-11-24T04:49:48,249 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242 2024-11-24T04:49:48,251 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:48,252 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 8c6781474a781ebad2c10e6f41a23bde, NAME => 'testSequentialEditLogSeqNum,,1732423788236.8c6781474a781ebad2c10e6f41a23bde.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:48,252 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1732423788236.8c6781474a781ebad2c10e6f41a23bde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:48,252 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,253 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,253 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde doesn't exist for region: 8c6781474a781ebad2c10e6f41a23bde on table testSequentialEditLogSeqNum 2024-11-24T04:49:48,254 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 8c6781474a781ebad2c10e6f41a23bde on table testSequentialEditLogSeqNum 2024-11-24T04:49:48,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741875_1052 (size=62) 2024-11-24T04:49:48,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741875_1052 (size=62) 2024-11-24T04:49:48,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741875_1052 (size=62) 2024-11-24T04:49:48,265 INFO [StoreOpener-8c6781474a781ebad2c10e6f41a23bde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,267 INFO [StoreOpener-8c6781474a781ebad2c10e6f41a23bde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c6781474a781ebad2c10e6f41a23bde columnFamilyName a 2024-11-24T04:49:48,267 DEBUG [StoreOpener-8c6781474a781ebad2c10e6f41a23bde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:48,268 INFO [StoreOpener-8c6781474a781ebad2c10e6f41a23bde-1 {}] regionserver.HStore(327): Store=8c6781474a781ebad2c10e6f41a23bde/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:48,268 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,269 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,269 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,270 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,270 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,272 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8c6781474a781ebad2c10e6f41a23bde 2024-11-24T04:49:48,274 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:48,275 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8c6781474a781ebad2c10e6f41a23bde; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63553544, jitterRate=-0.05297839641571045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:49:48,276 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8c6781474a781ebad2c10e6f41a23bde: Writing region info on filesystem at 1732423788253Initializing all the Stores at 1732423788265 (+12 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423788265Cleaning up temporary data from old regions at 1732423788270 (+5 ms)Region opened successfully at 1732423788276 (+6 ms) 2024-11-24T04:49:48,291 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8c6781474a781ebad2c10e6f41a23bde 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-11-24T04:49:48,312 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/.tmp/a/afb3790c6e8b4cddbef8c13ab45fa2ec is 81, key is testSequentialEditLogSeqNum/a:x0/1732423788276/Put/seqid=0 2024-11-24T04:49:48,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741876_1053 (size=5833) 2024-11-24T04:49:48,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741876_1053 (size=5833) 2024-11-24T04:49:48,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741876_1053 (size=5833) 2024-11-24T04:49:48,320 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/.tmp/a/afb3790c6e8b4cddbef8c13ab45fa2ec 2024-11-24T04:49:48,328 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/.tmp/a/afb3790c6e8b4cddbef8c13ab45fa2ec as hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/a/afb3790c6e8b4cddbef8c13ab45fa2ec 2024-11-24T04:49:48,335 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/a/afb3790c6e8b4cddbef8c13ab45fa2ec, entries=10, sequenceid=13, filesize=5.7 K 2024-11-24T04:49:48,337 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 8c6781474a781ebad2c10e6f41a23bde in 47ms, sequenceid=13, compaction requested=false 2024-11-24T04:49:48,337 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 8c6781474a781ebad2c10e6f41a23bde: 2024-11-24T04:49:48,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T04:49:48,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T04:49:48,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T04:49:48,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T04:49:48,344 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T04:49:48,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741874_1051 (size=1844) 2024-11-24T04:49:48,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741874_1051 (size=1844) 2024-11-24T04:49:48,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741874_1051 (size=1844) 2024-11-24T04:49:48,363 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242, size=1.8 K (1844bytes) 2024-11-24T04:49:48,363 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242 2024-11-24T04:49:48,364 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242 after 0ms 2024-11-24T04:49:48,366 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:48,367 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242 took 4ms 2024-11-24T04:49:48,370 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242 so closing down 2024-11-24T04:49:48,370 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:48,370 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1732423788242.temp 2024-11-24T04:49:48,372 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000003-wal.1732423788242.temp 2024-11-24T04:49:48,373 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:48,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741877_1054 (size=1477) 2024-11-24T04:49:48,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741877_1054 (size=1477) 2024-11-24T04:49:48,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741877_1054 (size=1477) 2024-11-24T04:49:48,409 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000003-wal.1732423788242.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-11-24T04:49:48,411 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000003-wal.1732423788242.temp to hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000020 2024-11-24T04:49:48,411 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 44 ms; skipped=2; WAL=hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242, size=1.8 K, length=1844, corrupted=false, cancelled=false 2024-11-24T04:49:48,411 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242, journal: Splitting hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242, size=1.8 K (1844bytes) at 1732423788363Finishing writing output for hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242 so closing down at 1732423788370 (+7 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000003-wal.1732423788242.temp at 1732423788372 (+2 ms)3 split writer threads finished at 1732423788373 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000003-wal.1732423788242.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1732423788409 (+36 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000003-wal.1732423788242.temp to hdfs://localhost:38973/hbase/data/default/testSequentialEditLogSeqNum/8c6781474a781ebad2c10e6f41a23bde/recovered.edits/0000000000000000020 at 1732423788411 (+2 ms)Processed 17 edits across 1 Regions in 44 ms; skipped=2; WAL=hdfs://localhost:38973/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1732423788235/wal.1732423788242, size=1.8 K, length=1844, corrupted=false, cancelled=false at 1732423788411 2024-11-24T04:49:48,425 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=402 (was 397) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:38202 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:33276 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:59778 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=933 (was 897) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=631 (was 631), ProcessCount=11 (was 11), AvailableMemoryMB=10934 (was 11005) 2024-11-24T04:49:48,439 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=402, OpenFileDescriptor=933, MaxFileDescriptor=1048576, SystemLoadAverage=631, ProcessCount=11, AvailableMemoryMB=10933 2024-11-24T04:49:48,460 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:48,462 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:48,463 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:48,466 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-28617768, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-28617768, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:48,478 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-28617768/hregion-28617768.1732423788467, exclude list is [], retry=0 2024-11-24T04:49:48,482 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:48,482 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:48,482 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:48,484 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-28617768/hregion-28617768.1732423788467 2024-11-24T04:49:48,484 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:48,485 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 2c966d8fd4b1be3760a6c796e7d24a38, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:49:48,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741879_1056 (size=70) 2024-11-24T04:49:48,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741879_1056 (size=70) 2024-11-24T04:49:48,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741879_1056 (size=70) 2024-11-24T04:49:48,525 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:48,530 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,533 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName a 2024-11-24T04:49:48,533 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:48,534 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:48,534 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,536 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName b 2024-11-24T04:49:48,536 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:48,536 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:48,536 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,538 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName c 2024-11-24T04:49:48,538 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:48,538 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:48,539 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,539 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,541 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,542 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,542 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,543 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:48,544 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,546 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:48,547 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2c966d8fd4b1be3760a6c796e7d24a38; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70465312, jitterRate=0.05001497268676758}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:48,548 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2c966d8fd4b1be3760a6c796e7d24a38: Writing region info on filesystem at 1732423788525Initializing all the Stores at 1732423788526 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423788526Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423788530 (+4 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423788530Cleaning up temporary data from old regions at 1732423788542 (+12 ms)Region opened successfully at 1732423788548 (+6 ms) 2024-11-24T04:49:48,548 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2c966d8fd4b1be3760a6c796e7d24a38, disabling compactions & flushes 2024-11-24T04:49:48,548 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:48,548 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:48,548 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. after waiting 0 ms 2024-11-24T04:49:48,548 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:48,549 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:48,549 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2c966d8fd4b1be3760a6c796e7d24a38: Waiting for close lock at 1732423788548Disabling compacts and flushes for region at 1732423788548Disabling writes for close at 1732423788548Writing region close event to WAL at 1732423788549 (+1 ms)Closed at 1732423788549 2024-11-24T04:49:48,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741878_1055 (size=95) 2024-11-24T04:49:48,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741878_1055 (size=95) 2024-11-24T04:49:48,552 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-28617768/hregion-28617768.1732423788467 not finished, retry = 0 2024-11-24T04:49:48,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741878_1055 (size=95) 2024-11-24T04:49:48,658 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:48,658 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-28617768:(num 1732423788467) 2024-11-24T04:49:48,659 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:48,662 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:48,676 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663, exclude list is [], retry=0 2024-11-24T04:49:48,679 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:48,680 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:48,680 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:48,682 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 2024-11-24T04:49:48,682 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:48,682 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c966d8fd4b1be3760a6c796e7d24a38, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:48,682 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:48,682 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,682 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,684 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,685 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName a 2024-11-24T04:49:48,685 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:48,685 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:48,686 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,686 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName b 2024-11-24T04:49:48,686 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:48,687 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:48,687 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,688 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName c 2024-11-24T04:49:48,688 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:48,688 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:48,689 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,689 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,690 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,692 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,692 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,692 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:48,694 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:48,695 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2c966d8fd4b1be3760a6c796e7d24a38; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74844375, jitterRate=0.11526809632778168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:48,696 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2c966d8fd4b1be3760a6c796e7d24a38: Writing region info on filesystem at 1732423788683Initializing all the Stores at 1732423788683Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423788684 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423788684Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423788684Cleaning up temporary data from old regions at 1732423788692 (+8 ms)Region opened successfully at 1732423788696 (+4 ms) 2024-11-24T04:49:48,700 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1732423788699/Put/seqid=0 2024-11-24T04:49:48,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741881_1058 (size=4826) 2024-11-24T04:49:48,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741881_1058 (size=4826) 2024-11-24T04:49:48,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741881_1058 (size=4826) 2024-11-24T04:49:48,709 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38973/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in 2c966d8fd4b1be3760a6c796e7d24a38/a 2024-11-24T04:49:48,716 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-11-24T04:49:48,716 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-24T04:49:48,716 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2c966d8fd4b1be3760a6c796e7d24a38: 2024-11-24T04:49:48,718 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/a/e75852ce477e4db5bbf96225881f4361_SeqId_3_ 2024-11-24T04:49:48,719 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38973/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 2c966d8fd4b1be3760a6c796e7d24a38/a as hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/a/e75852ce477e4db5bbf96225881f4361_SeqId_3_ - updating store file list. 2024-11-24T04:49:48,724 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for e75852ce477e4db5bbf96225881f4361_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-24T04:49:48,724 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/a/e75852ce477e4db5bbf96225881f4361_SeqId_3_ into 2c966d8fd4b1be3760a6c796e7d24a38/a 2024-11-24T04:49:48,724 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38973/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 2c966d8fd4b1be3760a6c796e7d24a38/a (new location: hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/a/e75852ce477e4db5bbf96225881f4361_SeqId_3_) 2024-11-24T04:49:48,764 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663, size=0 (0bytes) 2024-11-24T04:49:48,764 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 might be still open, length is 0 2024-11-24T04:49:48,764 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 2024-11-24T04:49:48,765 WARN [IPC Server handler 4 on default port 38973 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-11-24T04:49:48,765 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 after 1ms 2024-11-24T04:49:50,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:59862 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59862 dst: /127.0.0.1:44605 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44605 remote=/127.0.0.1:59862]. Total timeout mills is 60000, 58652 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:50,079 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:33494 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33494 dst: /127.0.0.1:44795 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:50,079 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:38342 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:36429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38342 dst: /127.0.0.1:36429 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:50,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741880_1059 (size=474) 2024-11-24T04:49:50,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741880_1059 (size=474) 2024-11-24T04:49:52,767 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 after 4003ms 2024-11-24T04:49:52,774 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:52,774 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 took 4010ms 2024-11-24T04:49:52,776 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663; continuing. 2024-11-24T04:49:52,776 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 so closing down 2024-11-24T04:49:52,776 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:52,778 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1732423788663.temp 2024-11-24T04:49:52,780 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005-wal.1732423788663.temp 2024-11-24T04:49:52,780 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741882_1060 (size=259) 2024-11-24T04:49:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741882_1060 (size=259) 2024-11-24T04:49:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741882_1060 (size=259) 2024-11-24T04:49:52,798 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005-wal.1732423788663.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-24T04:49:52,799 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005-wal.1732423788663.temp to hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005 2024-11-24T04:49:52,799 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 25 ms; skipped=1; WAL=hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663, size=0, length=0, corrupted=false, cancelled=false 2024-11-24T04:49:52,799 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663, journal: Splitting hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663, size=0 (0bytes) at 1732423788764Finishing writing output for hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 so closing down at 1732423792776 (+4012 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005-wal.1732423788663.temp at 1732423792780 (+4 ms)3 split writer threads finished at 1732423792780Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005-wal.1732423788663.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1732423792798 (+18 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005-wal.1732423788663.temp to hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005 at 1732423792799 (+1 ms)Processed 2 edits across 1 Regions in 25 ms; skipped=1; WAL=hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663, size=0, length=0, corrupted=false, cancelled=false at 1732423792799 2024-11-24T04:49:52,801 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423788663 2024-11-24T04:49:52,802 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005 2024-11-24T04:49:52,802 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:52,804 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:52,817 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423792804, exclude list is [], retry=0 2024-11-24T04:49:52,820 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:52,820 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:52,821 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:52,822 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423792804 2024-11-24T04:49:52,823 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:52,823 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c966d8fd4b1be3760a6c796e7d24a38, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:52,823 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:52,823 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,823 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,825 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,826 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName a 2024-11-24T04:49:52,826 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:52,833 DEBUG [StoreFileOpener-2c966d8fd4b1be3760a6c796e7d24a38-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for e75852ce477e4db5bbf96225881f4361_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-24T04:49:52,833 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/a/e75852ce477e4db5bbf96225881f4361_SeqId_3_ 2024-11-24T04:49:52,834 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:52,834 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,835 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName b 2024-11-24T04:49:52,835 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:52,835 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:52,835 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,836 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c966d8fd4b1be3760a6c796e7d24a38 columnFamilyName c 2024-11-24T04:49:52,836 DEBUG [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:52,837 INFO [StoreOpener-2c966d8fd4b1be3760a6c796e7d24a38-1 {}] regionserver.HStore(327): Store=2c966d8fd4b1be3760a6c796e7d24a38/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:52,837 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,838 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,839 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,840 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005 2024-11-24T04:49:52,842 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:52,844 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005 2024-11-24T04:49:52,844 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2c966d8fd4b1be3760a6c796e7d24a38 3/3 column families, dataSize=58 B heapSize=904 B 2024-11-24T04:49:52,863 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/.tmp/a/aef842d0c4e24249aa196923216d4426 is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1732423788728/Put/seqid=0 2024-11-24T04:49:52,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741884_1062 (size=5149) 2024-11-24T04:49:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741884_1062 (size=5149) 2024-11-24T04:49:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741884_1062 (size=5149) 2024-11-24T04:49:52,872 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/.tmp/a/aef842d0c4e24249aa196923216d4426 2024-11-24T04:49:52,878 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/.tmp/a/aef842d0c4e24249aa196923216d4426 as hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/a/aef842d0c4e24249aa196923216d4426 2024-11-24T04:49:52,884 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/a/aef842d0c4e24249aa196923216d4426, entries=1, sequenceid=5, filesize=5.0 K 2024-11-24T04:49:52,885 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 2c966d8fd4b1be3760a6c796e7d24a38 in 41ms, sequenceid=5, compaction requested=false; wal=null 2024-11-24T04:49:52,886 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/0000000000000000005 2024-11-24T04:49:52,887 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,887 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,888 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:52,889 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2c966d8fd4b1be3760a6c796e7d24a38 2024-11-24T04:49:52,892 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/2c966d8fd4b1be3760a6c796e7d24a38/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-11-24T04:49:52,893 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2c966d8fd4b1be3760a6c796e7d24a38; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63552959, jitterRate=-0.05298711359500885}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:52,893 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2c966d8fd4b1be3760a6c796e7d24a38: Writing region info on filesystem at 1732423792823Initializing all the Stores at 1732423792824 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423792825 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423792825Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423792825Obtaining lock to block concurrent updates at 1732423792844 (+19 ms)Preparing flush snapshotting stores in 2c966d8fd4b1be3760a6c796e7d24a38 at 1732423792844Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1732423792844Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. at 1732423792844Flushing 2c966d8fd4b1be3760a6c796e7d24a38/a: creating writer at 1732423792844Flushing 2c966d8fd4b1be3760a6c796e7d24a38/a: appending metadata at 1732423792863 (+19 ms)Flushing 2c966d8fd4b1be3760a6c796e7d24a38/a: closing flushed file at 1732423792863Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ab5b372: reopening flushed file at 1732423792877 (+14 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 2c966d8fd4b1be3760a6c796e7d24a38 in 41ms, sequenceid=5, compaction requested=false; wal=null at 1732423792885 (+8 ms)Cleaning up temporary data from old regions at 1732423792887 (+2 ms)Region opened successfully at 1732423792893 (+6 ms) 2024-11-24T04:49:52,897 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2c966d8fd4b1be3760a6c796e7d24a38, disabling compactions & flushes 2024-11-24T04:49:52,897 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:52,897 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:52,897 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. after waiting 0 ms 2024-11-24T04:49:52,897 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:52,898 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1732423788461.2c966d8fd4b1be3760a6c796e7d24a38. 2024-11-24T04:49:52,898 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2c966d8fd4b1be3760a6c796e7d24a38: Waiting for close lock at 1732423792897Disabling compacts and flushes for region at 1732423792897Disabling writes for close at 1732423792897Writing region close event to WAL at 1732423792898 (+1 ms)Closed at 1732423792898 2024-11-24T04:49:52,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741883_1061 (size=95) 2024-11-24T04:49:52,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741883_1061 (size=95) 2024-11-24T04:49:52,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741883_1061 (size=95) 2024-11-24T04:49:52,903 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:52,904 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1732423792804) 2024-11-24T04:49:52,917 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=404 (was 402) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (139959168) connection to localhost/127.0.0.1:38973 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1699016780_22 at /127.0.0.1:36914 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:38973 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1699016780_22 at /127.0.0.1:40746 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1699016780_22 at /127.0.0.1:57520 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=993 (was 933) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=580 (was 631), ProcessCount=11 (was 11), AvailableMemoryMB=10918 (was 10933) 2024-11-24T04:49:52,931 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=404, OpenFileDescriptor=993, MaxFileDescriptor=1048576, SystemLoadAverage=580, ProcessCount=11, AvailableMemoryMB=10914 2024-11-24T04:49:52,945 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:52,950 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T04:49:52,954 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 4464c5b832df,34701,1732423776004 2024-11-24T04:49:52,956 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77a4078 2024-11-24T04:49:52,957 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T04:49:52,959 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T04:49:52,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T04:49:52,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-11-24T04:49:52,970 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T04:49:52,972 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-11-24T04:49:52,973 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:52,975 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T04:49:52,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T04:49:52,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741885_1063 (size=694) 2024-11-24T04:49:52,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741885_1063 (size=694) 2024-11-24T04:49:52,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741885_1063 (size=694) 2024-11-24T04:49:52,988 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7fab4f06c042c658eda5a15104ff7acf, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f 2024-11-24T04:49:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741886_1064 (size=77) 2024-11-24T04:49:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741886_1064 (size=77) 2024-11-24T04:49:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741886_1064 (size=77) 2024-11-24T04:49:52,999 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:52,999 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 7fab4f06c042c658eda5a15104ff7acf, disabling compactions & flushes 2024-11-24T04:49:52,999 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:52,999 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:52,999 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. after waiting 0 ms 2024-11-24T04:49:52,999 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:52,999 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:52,999 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7fab4f06c042c658eda5a15104ff7acf: Waiting for close lock at 1732423792999Disabling compacts and flushes for region at 1732423792999Disabling writes for close at 1732423792999Writing region close event to WAL at 1732423792999Closed at 1732423792999 2024-11-24T04:49:53,001 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T04:49:53,005 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1732423793001"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732423793001"}]},"ts":"1732423793001"} 2024-11-24T04:49:53,009 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T04:49:53,010 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T04:49:53,013 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732423793010"}]},"ts":"1732423793010"} 2024-11-24T04:49:53,017 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-11-24T04:49:53,017 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {4464c5b832df=0} racks are {/default-rack=0} 2024-11-24T04:49:53,018 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-24T04:49:53,018 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-24T04:49:53,019 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-24T04:49:53,019 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-24T04:49:53,019 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-24T04:49:53,019 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-24T04:49:53,019 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-24T04:49:53,019 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-24T04:49:53,019 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-24T04:49:53,019 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-24T04:49:53,020 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN}] 2024-11-24T04:49:53,022 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN 2024-11-24T04:49:53,024 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN; state=OFFLINE, location=4464c5b832df,39021,1732423777669; forceNewPlan=false, retain=false 2024-11-24T04:49:53,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T04:49:53,180 INFO [4464c5b832df:34701 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T04:49:53,181 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPENING, regionLocation=4464c5b832df,39021,1732423777669 2024-11-24T04:49:53,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN because future has completed 2024-11-24T04:49:53,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669}] 2024-11-24T04:49:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T04:49:53,341 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T04:49:53,344 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49425, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T04:49:53,352 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,353 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7fab4f06c042c658eda5a15104ff7acf, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:53,353 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,354 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:53,354 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,354 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,356 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,358 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf1 2024-11-24T04:49:53,358 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:53,359 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:53,360 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,361 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf2 2024-11-24T04:49:53,361 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:53,362 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:53,362 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,363 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,364 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,365 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,365 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,366 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-24T04:49:53,368 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,371 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:53,371 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7fab4f06c042c658eda5a15104ff7acf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69596542, jitterRate=0.03706929087638855}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-24T04:49:53,372 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,372 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7fab4f06c042c658eda5a15104ff7acf: Running coprocessor pre-open hook at 1732423793354Writing region info on filesystem at 1732423793354Initializing all the Stores at 1732423793355 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423793356 (+1 ms)Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423793356Cleaning up temporary data from old regions at 1732423793365 (+9 ms)Running coprocessor post-open hooks at 1732423793372 (+7 ms)Region opened successfully at 1732423793372 2024-11-24T04:49:53,374 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., pid=6, masterSystemTime=1732423793340 2024-11-24T04:49:53,377 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,377 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,378 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPEN, openSeqNum=2, regionLocation=4464c5b832df,39021,1732423777669 2024-11-24T04:49:53,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669 because future has completed 2024-11-24T04:49:53,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T04:49:53,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669 in 198 msec 2024-11-24T04:49:53,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T04:49:53,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN in 368 msec 2024-11-24T04:49:53,394 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T04:49:53,394 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732423793394"}]},"ts":"1732423793394"} 2024-11-24T04:49:53,397 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-11-24T04:49:53,399 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T04:49:53,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 435 msec 2024-11-24T04:49:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T04:49:53,607 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-11-24T04:49:53,607 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-11-24T04:49:53,610 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T04:49:53,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-11-24T04:49:53,618 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T04:49:53,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-11-24T04:49:53,633 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=2] 2024-11-24T04:49:53,634 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T04:49:53,637 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T04:49:53,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=7fab4f06c042c658eda5a15104ff7acf, source=4464c5b832df,39021,1732423777669, destination=4464c5b832df,39197,1732423777462, warming up region on 4464c5b832df,39197,1732423777462 2024-11-24T04:49:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T04:49:53,660 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=7fab4f06c042c658eda5a15104ff7acf, source=4464c5b832df,39021,1732423777669, destination=4464c5b832df,39197,1732423777462, running balancer 2024-11-24T04:49:53,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42337, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T04:49:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE 2024-11-24T04:49:53,662 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE 2024-11-24T04:49:53,665 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=CLOSING, regionLocation=4464c5b832df,39021,1732423777669 2024-11-24T04:49:53,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(7855): Warmup {ENCODED => 7fab4f06c042c658eda5a15104ff7acf, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:53,668 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE because future has completed 2024-11-24T04:49:53,669 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf1 2024-11-24T04:49:53,669 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:53,670 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-24T04:49:53,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669}] 2024-11-24T04:49:53,670 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:53,670 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,671 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf2 2024-11-24T04:49:53,671 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:53,672 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(1722): Closing 7fab4f06c042c658eda5a15104ff7acf, disabling compactions & flushes 2024-11-24T04:49:53,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. after waiting 0 ms 2024-11-24T04:49:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,673 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] regionserver.HRegion(1676): Region close journal for 7fab4f06c042c658eda5a15104ff7acf: Waiting for close lock at 1732423793672Disabling compacts and flushes for region at 1732423793672Disabling writes for close at 1732423793672Writing region close event to WAL at 1732423793673 (+1 ms)Closed at 1732423793673 2024-11-24T04:49:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-11-24T04:49:53,833 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,833 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-24T04:49:53,835 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 7fab4f06c042c658eda5a15104ff7acf, disabling compactions & flushes 2024-11-24T04:49:53,835 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,835 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,835 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. after waiting 0 ms 2024-11-24T04:49:53,835 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,836 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 7fab4f06c042c658eda5a15104ff7acf 2/2 column families, dataSize=31 B heapSize=616 B 2024-11-24T04:49:53,858 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf1/6cdb62ad5c604d0c9c1b84c3011802ae is 35, key is r1/cf1:q/1732423793637/Put/seqid=0 2024-11-24T04:49:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741887_1065 (size=4783) 2024-11-24T04:49:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741887_1065 (size=4783) 2024-11-24T04:49:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741887_1065 (size=4783) 2024-11-24T04:49:53,866 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf1/6cdb62ad5c604d0c9c1b84c3011802ae 2024-11-24T04:49:53,874 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf1/6cdb62ad5c604d0c9c1b84c3011802ae as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae 2024-11-24T04:49:53,882 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae, entries=1, sequenceid=5, filesize=4.7 K 2024-11-24T04:49:53,883 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 7fab4f06c042c658eda5a15104ff7acf in 48ms, sequenceid=5, compaction requested=false 2024-11-24T04:49:53,884 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-11-24T04:49:53,889 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-24T04:49:53,892 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:53,892 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 7fab4f06c042c658eda5a15104ff7acf: Waiting for close lock at 1732423793834Running coprocessor pre-close hooks at 1732423793834Disabling compacts and flushes for region at 1732423793834Disabling writes for close at 1732423793835 (+1 ms)Obtaining lock to block concurrent updates at 1732423793836 (+1 ms)Preparing flush snapshotting stores in 7fab4f06c042c658eda5a15104ff7acf at 1732423793836Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1732423793837 (+1 ms)Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. at 1732423793838 (+1 ms)Flushing 7fab4f06c042c658eda5a15104ff7acf/cf1: creating writer at 1732423793838Flushing 7fab4f06c042c658eda5a15104ff7acf/cf1: appending metadata at 1732423793858 (+20 ms)Flushing 7fab4f06c042c658eda5a15104ff7acf/cf1: closing flushed file at 1732423793858Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42a08449: reopening flushed file at 1732423793873 (+15 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 7fab4f06c042c658eda5a15104ff7acf in 48ms, sequenceid=5, compaction requested=false at 1732423793884 (+11 ms)Writing region close event to WAL at 1732423793885 (+1 ms)Running coprocessor post-close hooks at 1732423793890 (+5 ms)Closed at 1732423793892 (+2 ms) 2024-11-24T04:49:53,893 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 7fab4f06c042c658eda5a15104ff7acf move to 4464c5b832df,39197,1732423777462 record at close sequenceid=5 2024-11-24T04:49:53,896 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:53,897 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=CLOSED 2024-11-24T04:49:53,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669 because future has completed 2024-11-24T04:49:53,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T04:49:53,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669 in 232 msec 2024-11-24T04:49:53,907 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE; state=CLOSED, location=4464c5b832df,39197,1732423777462; forceNewPlan=false, retain=false 2024-11-24T04:49:54,058 INFO [4464c5b832df:34701 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T04:49:54,058 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPENING, regionLocation=4464c5b832df,39197,1732423777462 2024-11-24T04:49:54,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE because future has completed 2024-11-24T04:49:54,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462}] 2024-11-24T04:49:54,224 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,224 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 7fab4f06c042c658eda5a15104ff7acf, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:54,225 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,225 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:54,225 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,226 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,228 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,230 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf1 2024-11-24T04:49:54,230 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:54,238 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae 2024-11-24T04:49:54,238 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:54,238 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,239 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf2 2024-11-24T04:49:54,239 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:54,240 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:54,240 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,241 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,243 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,244 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,244 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,245 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-24T04:49:54,246 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,247 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 7fab4f06c042c658eda5a15104ff7acf; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63829294, jitterRate=-0.04886940121650696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-24T04:49:54,247 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,248 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 7fab4f06c042c658eda5a15104ff7acf: Running coprocessor pre-open hook at 1732423794226Writing region info on filesystem at 1732423794226Initializing all the Stores at 1732423794228 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423794228Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423794228Cleaning up temporary data from old regions at 1732423794244 (+16 ms)Running coprocessor post-open hooks at 1732423794248 (+4 ms)Region opened successfully at 1732423794248 2024-11-24T04:49:54,250 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., pid=9, masterSystemTime=1732423794216 2024-11-24T04:49:54,252 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,252 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,253 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPEN, openSeqNum=9, regionLocation=4464c5b832df,39197,1732423777462 2024-11-24T04:49:54,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462 because future has completed 2024-11-24T04:49:54,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-24T04:49:54,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462 in 196 msec 2024-11-24T04:49:54,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE in 600 msec 2024-11-24T04:49:54,267 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T04:49:54,269 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T04:49:54,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.2:44476 deadline: 1732423854272, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39197 startCode=1732423777462. As of locationSeqNum=5. 2024-11-24T04:49:54,278 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39197 startCode=1732423777462. As of locationSeqNum=5. 2024-11-24T04:49:54,279 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39197 startCode=1732423777462. As of locationSeqNum=5. 2024-11-24T04:49:54,279 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39197,1732423777462, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39197 startCode=1732423777462. As of locationSeqNum=5. 2024-11-24T04:49:54,385 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T04:49:54,387 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T04:49:54,398 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7fab4f06c042c658eda5a15104ff7acf 2/2 column families, dataSize=50 B heapSize=720 B 2024-11-24T04:49:54,420 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf1/241df528528a423795811cf66d9f808e is 29, key is r1/cf1:/1732423794388/DeleteFamily/seqid=0 2024-11-24T04:49:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741888_1066 (size=4906) 2024-11-24T04:49:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741888_1066 (size=4906) 2024-11-24T04:49:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741888_1066 (size=4906) 2024-11-24T04:49:54,428 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf1/241df528528a423795811cf66d9f808e 2024-11-24T04:49:54,434 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 241df528528a423795811cf66d9f808e 2024-11-24T04:49:54,449 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf2/5a1888758f674d1c9ee9bfbf38b31ea1 is 29, key is r1/cf2:/1732423794388/DeleteFamily/seqid=0 2024-11-24T04:49:54,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741889_1067 (size=4906) 2024-11-24T04:49:54,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741889_1067 (size=4906) 2024-11-24T04:49:54,457 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf2/5a1888758f674d1c9ee9bfbf38b31ea1 2024-11-24T04:49:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741889_1067 (size=4906) 2024-11-24T04:49:54,463 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5a1888758f674d1c9ee9bfbf38b31ea1 2024-11-24T04:49:54,464 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf1/241df528528a423795811cf66d9f808e as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/241df528528a423795811cf66d9f808e 2024-11-24T04:49:54,471 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 241df528528a423795811cf66d9f808e 2024-11-24T04:49:54,471 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/241df528528a423795811cf66d9f808e, entries=1, sequenceid=12, filesize=4.8 K 2024-11-24T04:49:54,472 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf2/5a1888758f674d1c9ee9bfbf38b31ea1 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/5a1888758f674d1c9ee9bfbf38b31ea1 2024-11-24T04:49:54,478 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5a1888758f674d1c9ee9bfbf38b31ea1 2024-11-24T04:49:54,478 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/5a1888758f674d1c9ee9bfbf38b31ea1, entries=1, sequenceid=12, filesize=4.8 K 2024-11-24T04:49:54,479 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 7fab4f06c042c658eda5a15104ff7acf in 81ms, sequenceid=12, compaction requested=false 2024-11-24T04:49:54,479 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7fab4f06c042c658eda5a15104ff7acf: 2024-11-24T04:49:54,482 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-24T04:49:54,483 DEBUG [Time-limited test {}] regionserver.HStore(1541): 7fab4f06c042c658eda5a15104ff7acf/cf1 is initiating major compaction (all files) 2024-11-24T04:49:54,484 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T04:49:54,484 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:54,484 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 7fab4f06c042c658eda5a15104ff7acf/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,485 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae, hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/241df528528a423795811cf66d9f808e] into tmpdir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp, totalSize=9.5 K 2024-11-24T04:49:54,486 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 6cdb62ad5c604d0c9c1b84c3011802ae, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732423793637 2024-11-24T04:49:54,486 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 241df528528a423795811cf66d9f808e, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-24T04:49:54,498 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 7fab4f06c042c658eda5a15104ff7acf#cf1#compaction#16 average throughput is 0.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T04:49:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741890_1068 (size=4626) 2024-11-24T04:49:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741890_1068 (size=4626) 2024-11-24T04:49:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741890_1068 (size=4626) 2024-11-24T04:49:54,512 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf1/18c6e766a4aa4e1da931add6686d18b3 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/18c6e766a4aa4e1da931add6686d18b3 2024-11-24T04:49:54,525 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 7fab4f06c042c658eda5a15104ff7acf/cf1 of 7fab4f06c042c658eda5a15104ff7acf into 18c6e766a4aa4e1da931add6686d18b3(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T04:49:54,526 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 7fab4f06c042c658eda5a15104ff7acf: 2024-11-24T04:49:54,526 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T04:49:54,526 DEBUG [Time-limited test {}] regionserver.HStore(1541): 7fab4f06c042c658eda5a15104ff7acf/cf2 is initiating major compaction (all files) 2024-11-24T04:49:54,526 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T04:49:54,526 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T04:49:54,526 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 7fab4f06c042c658eda5a15104ff7acf/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,526 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/5a1888758f674d1c9ee9bfbf38b31ea1] into tmpdir=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp, totalSize=4.8 K 2024-11-24T04:49:54,527 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5a1888758f674d1c9ee9bfbf38b31ea1, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-24T04:49:54,532 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 7fab4f06c042c658eda5a15104ff7acf#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T04:49:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741891_1069 (size=4592) 2024-11-24T04:49:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741891_1069 (size=4592) 2024-11-24T04:49:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741891_1069 (size=4592) 2024-11-24T04:49:54,545 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/.tmp/cf2/37dc9b07af424e129e835f8b590bdcc6 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/37dc9b07af424e129e835f8b590bdcc6 2024-11-24T04:49:54,552 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 7fab4f06c042c658eda5a15104ff7acf/cf2 of 7fab4f06c042c658eda5a15104ff7acf into 37dc9b07af424e129e835f8b590bdcc6(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T04:49:54,552 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 7fab4f06c042c658eda5a15104ff7acf: 2024-11-24T04:49:54,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=7fab4f06c042c658eda5a15104ff7acf, source=4464c5b832df,39197,1732423777462, destination=4464c5b832df,39021,1732423777669, warming up region on 4464c5b832df,39021,1732423777669 2024-11-24T04:49:54,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=7fab4f06c042c658eda5a15104ff7acf, source=4464c5b832df,39197,1732423777462, destination=4464c5b832df,39021,1732423777669, running balancer 2024-11-24T04:49:54,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE 2024-11-24T04:49:54,558 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE 2024-11-24T04:49:54,560 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=CLOSING, regionLocation=4464c5b832df,39197,1732423777462 2024-11-24T04:49:54,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(7855): Warmup {ENCODED => 7fab4f06c042c658eda5a15104ff7acf, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:54,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:54,561 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,562 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf1 2024-11-24T04:49:54,562 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:54,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE because future has completed 2024-11-24T04:49:54,563 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-24T04:49:54,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462}] 2024-11-24T04:49:54,570 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/18c6e766a4aa4e1da931add6686d18b3 2024-11-24T04:49:54,575 INFO [StoreFileOpener-7fab4f06c042c658eda5a15104ff7acf-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 241df528528a423795811cf66d9f808e 2024-11-24T04:49:54,575 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/241df528528a423795811cf66d9f808e 2024-11-24T04:49:54,580 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae 2024-11-24T04:49:54,580 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:54,581 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,582 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf2 2024-11-24T04:49:54,582 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:54,589 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/37dc9b07af424e129e835f8b590bdcc6 2024-11-24T04:49:54,593 INFO [StoreFileOpener-7fab4f06c042c658eda5a15104ff7acf-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5a1888758f674d1c9ee9bfbf38b31ea1 2024-11-24T04:49:54,594 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/5a1888758f674d1c9ee9bfbf38b31ea1 2024-11-24T04:49:54,594 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:54,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(1722): Closing 7fab4f06c042c658eda5a15104ff7acf, disabling compactions & flushes 2024-11-24T04:49:54,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. after waiting 0 ms 2024-11-24T04:49:54,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39021 {}] regionserver.HRegion(1676): Region close journal for 7fab4f06c042c658eda5a15104ff7acf: Waiting for close lock at 1732423794594Disabling compacts and flushes for region at 1732423794594Disabling writes for close at 1732423794594Writing region close event to WAL at 1732423794595 (+1 ms)Closed at 1732423794595 2024-11-24T04:49:54,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-11-24T04:49:54,717 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,717 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-24T04:49:54,717 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 7fab4f06c042c658eda5a15104ff7acf, disabling compactions & flushes 2024-11-24T04:49:54,718 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,718 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,718 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. after waiting 0 ms 2024-11-24T04:49:54,718 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,718 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae, hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/241df528528a423795811cf66d9f808e] to archive 2024-11-24T04:49:54,721 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T04:49:54,725 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae to hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/6cdb62ad5c604d0c9c1b84c3011802ae 2024-11-24T04:49:54,727 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/241df528528a423795811cf66d9f808e to hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/241df528528a423795811cf66d9f808e 2024-11-24T04:49:54,739 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/5a1888758f674d1c9ee9bfbf38b31ea1] to archive 2024-11-24T04:49:54,740 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T04:49:54,742 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/5a1888758f674d1c9ee9bfbf38b31ea1 to hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/5a1888758f674d1c9ee9bfbf38b31ea1 2024-11-24T04:49:54,748 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-11-24T04:49:54,749 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:54,749 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 7fab4f06c042c658eda5a15104ff7acf: Waiting for close lock at 1732423794717Running coprocessor pre-close hooks at 1732423794717Disabling compacts and flushes for region at 1732423794717Disabling writes for close at 1732423794718 (+1 ms)Writing region close event to WAL at 1732423794744 (+26 ms)Running coprocessor post-close hooks at 1732423794749 (+5 ms)Closed at 1732423794749 2024-11-24T04:49:54,749 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 7fab4f06c042c658eda5a15104ff7acf move to 4464c5b832df,39021,1732423777669 record at close sequenceid=12 2024-11-24T04:49:54,751 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:54,752 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=CLOSED 2024-11-24T04:49:54,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462 because future has completed 2024-11-24T04:49:54,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-24T04:49:54,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462 in 193 msec 2024-11-24T04:49:54,760 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE; state=CLOSED, location=4464c5b832df,39021,1732423777669; forceNewPlan=false, retain=false 2024-11-24T04:49:54,910 INFO [4464c5b832df:34701 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T04:49:54,911 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPENING, regionLocation=4464c5b832df,39021,1732423777669 2024-11-24T04:49:54,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE because future has completed 2024-11-24T04:49:54,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669}] 2024-11-24T04:49:55,072 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,072 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 7fab4f06c042c658eda5a15104ff7acf, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:55,073 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,073 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:55,073 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,073 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,076 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,077 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf1 2024-11-24T04:49:55,077 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:55,085 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/18c6e766a4aa4e1da931add6686d18b3 2024-11-24T04:49:55,085 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:55,085 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,086 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf2 2024-11-24T04:49:55,086 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:55,094 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/37dc9b07af424e129e835f8b590bdcc6 2024-11-24T04:49:55,094 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:55,094 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,096 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,097 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,098 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,098 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,099 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-24T04:49:55,101 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,102 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 7fab4f06c042c658eda5a15104ff7acf; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67325519, jitterRate=0.003228411078453064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-24T04:49:55,102 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,103 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 7fab4f06c042c658eda5a15104ff7acf: Running coprocessor pre-open hook at 1732423795074Writing region info on filesystem at 1732423795074Initializing all the Stores at 1732423795075 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423795075Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423795076 (+1 ms)Cleaning up temporary data from old regions at 1732423795098 (+22 ms)Running coprocessor post-open hooks at 1732423795102 (+4 ms)Region opened successfully at 1732423795103 (+1 ms) 2024-11-24T04:49:55,104 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., pid=12, masterSystemTime=1732423795067 2024-11-24T04:49:55,107 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,107 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,108 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPEN, openSeqNum=18, regionLocation=4464c5b832df,39021,1732423777669 2024-11-24T04:49:55,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669 because future has completed 2024-11-24T04:49:55,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-24T04:49:55,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39021,1732423777669 in 197 msec 2024-11-24T04:49:55,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, REOPEN/MOVE in 558 msec 2024-11-24T04:49:55,160 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T04:49:55,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T04:49:55,163 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server 4464c5b832df,39021,1732423777669: testing ***** 2024-11-24T04:49:55,163 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-24T04:49:55,165 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-24T04:49:55,166 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-24T04:49:55,170 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-24T04:49:55,172 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-24T04:49:55,183 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 325403336 }, "NonHeapMemoryUsage": { "committed": 172097536, "init": 7667712, "max": -1, "used": 169477560 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "4464c5b832df", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2068, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 0, "ProcessCallTime_max": 10, "ProcessCallTime_mean": 4, "ProcessCallTime_25th_percentile": 2, "ProcessCallTime_median": 5, "ProcessCallTime_75th_percentile": 7, "ProcessCallTime_90th_percentile": 9, "ProcessCallTime_95th_percentile": 9, "ProcessCallTime_98th_percentile": 9, "ProcessCallTime_99th_percentile": 9, "ProcessCallTime_99.9th_percentile": 9, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 1, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 11, "TotalCallTime_mean": 5, "TotalCallTime_25th_percentile": 3, "TotalCallTime_median": 6, "TotalCallTime_75th_percentile": 8, "TotalCallTime_90th_percentile": 10, "TotalCallTime_95th_percentile": 10, "TotalCallTime_98th_percentile": 10, "TotalCallTime_99th_percentile": 10, "TotalCallTime_99.9th_percentile": 10, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 174, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 130, "ResponseSize_90th_percentile": 156, "ResponseSize_95th_percentile": 165, "ResponseSize_98th_percentile": 170, "ResponseSize_99th_percentile": 172, "ResponseSize_99.9th_percentile": 173, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 348 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "4464c5b832df", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:55024", "tag.serverName": "4464c5b832df,39197,1732423777462", "tag.clusterId": "8c0792d3-101b-420e-9c9e-da22fc7d020f", "tag.Context": "regionserver", "tag.Hostname": "4464c5b832df", "regionCount": 0, "storeCount": 0, "hlogFileCount": 1, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1732423777462, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 0, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 0, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 0, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 0, "ScanTime_min": 0, "ScanTime_max": 0, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 0, "ScanTime_75th_percentile": 0, "ScanTime_90th_percentile": 0, "ScanTime_95th_percentile": 0, "ScanTime_98th_percentile": 0, "ScanTime_99th_percentile": 0, "ScanTime_99.9th_percentile": 0, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 0, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-11-24T04:49:55,187 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34701 {}] master.MasterRpcServices(700): 4464c5b832df,39021,1732423777669 reported a fatal error: ***** ABORTING region server 4464c5b832df,39021,1732423777669: testing ***** 2024-11-24T04:49:55,189 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4464c5b832df,39021,1732423777669' ***** 2024-11-24T04:49:55,189 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-11-24T04:49:55,190 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T04:49:55,190 INFO [RS:2;4464c5b832df:39021 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-11-24T04:49:55,190 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T04:49:55,190 INFO [RS:2;4464c5b832df:39021 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-11-24T04:49:55,190 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(3091): Received CLOSE for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,190 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(956): aborting server 4464c5b832df,39021,1732423777669 2024-11-24T04:49:55,190 INFO [RS:2;4464c5b832df:39021 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T04:49:55,191 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7fab4f06c042c658eda5a15104ff7acf, disabling compactions & flushes 2024-11-24T04:49:55,191 INFO [RS:2;4464c5b832df:39021 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;4464c5b832df:39021. 2024-11-24T04:49:55,191 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39197 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.2:44152 deadline: 1732423855191, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39021 startCode=1732423777669. As of locationSeqNum=12. 2024-11-24T04:49:55,191 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,191 DEBUG [RS:2;4464c5b832df:39021 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T04:49:55,191 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. after waiting 0 ms 2024-11-24T04:49:55,191 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,191 DEBUG [RS:2;4464c5b832df:39021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:49:55,192 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39197,1732423777462, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39197,1732423777462, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39021 startCode=1732423777669. As of locationSeqNum=12. 2024-11-24T04:49:55,192 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T04:49:55,192 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39197,1732423777462, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39021 startCode=1732423777669. As of locationSeqNum=12. 2024-11-24T04:49:55,192 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1325): Online Regions={7fab4f06c042c658eda5a15104ff7acf=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.} 2024-11-24T04:49:55,192 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39197,1732423777462, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4464c5b832df port=39021 startCode=1732423777669. As of locationSeqNum=12. 2024-11-24T04:49:55,192 DEBUG [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1351): Waiting on 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:55,193 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,193 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7fab4f06c042c658eda5a15104ff7acf: Waiting for close lock at 1732423795190Running coprocessor pre-close hooks at 1732423795191 (+1 ms)Disabling compacts and flushes for region at 1732423795191Disabling writes for close at 1732423795191Writing region close event to WAL at 1732423795193 (+2 ms)Running coprocessor post-close hooks at 1732423795193Closed at 1732423795193 2024-11-24T04:49:55,194 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:55,203 INFO [regionserver/4464c5b832df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T04:49:55,203 INFO [regionserver/4464c5b832df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T04:49:55,227 INFO [regionserver/4464c5b832df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T04:49:55,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 4464c5b832df,39021,1732423777669 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:55,307 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=12, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 4464c5b832df,39021,1732423777669 aborting 2024-11-24T04:49:55,307 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=12 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 4464c5b832df,39021,1732423777669 aborting 2024-11-24T04:49:55,307 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=12 from cache 2024-11-24T04:49:55,393 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(976): stopping server 4464c5b832df,39021,1732423777669; all regions closed. 2024-11-24T04:49:55,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741835_1011 (size=1405) 2024-11-24T04:49:55,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741835_1011 (size=1405) 2024-11-24T04:49:55,398 DEBUG [RS:2;4464c5b832df:39021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:49:55,398 INFO [RS:2;4464c5b832df:39021 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T04:49:55,398 INFO [RS:2;4464c5b832df:39021 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T04:49:55,399 INFO [RS:2;4464c5b832df:39021 {}] hbase.ChoreService(370): Chore service for: regionserver/4464c5b832df:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T04:49:55,399 INFO [regionserver/4464c5b832df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T04:49:55,399 INFO [RS:2;4464c5b832df:39021 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T04:49:55,399 INFO [RS:2;4464c5b832df:39021 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T04:49:55,400 INFO [RS:2;4464c5b832df:39021 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T04:49:55,400 INFO [RS:2;4464c5b832df:39021 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T04:49:55,400 INFO [RS:2;4464c5b832df:39021 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39021 2024-11-24T04:49:55,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4464c5b832df,39021,1732423777669 2024-11-24T04:49:55,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T04:49:55,467 INFO [RS:2;4464c5b832df:39021 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T04:49:55,468 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4464c5b832df,39021,1732423777669] 2024-11-24T04:49:55,490 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4464c5b832df,39021,1732423777669 already deleted, retry=false 2024-11-24T04:49:55,490 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of 4464c5b832df,39021,1732423777669 on 4464c5b832df,34701,1732423776004 2024-11-24T04:49:55,496 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 4464c5b832df,39021,1732423777669, splitWal=true, meta=false 2024-11-24T04:49:55,498 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for 4464c5b832df,39021,1732423777669 (carryingMeta=false) 4464c5b832df,39021,1732423777669/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@453077c0[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-11-24T04:49:55,500 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 4464c5b832df,39021,1732423777669, splitWal=true, meta=false 2024-11-24T04:49:55,503 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(207): 4464c5b832df,39021,1732423777669 had 1 regions 2024-11-24T04:49:55,505 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 4464c5b832df,39021,1732423777669, splitWal=true, meta=false, isMeta: false 2024-11-24T04:49:55,507 DEBUG [PEWorker-1 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting 2024-11-24T04:49:55,508 INFO [PEWorker-1 {}] master.SplitWALManager(105): 4464c5b832df,39021,1732423777669 WAL count=1, meta=false 2024-11-24T04:49:55,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 4464c5b832df%2C39021%2C1732423777669.1732423779373}] 2024-11-24T04:49:55,517 DEBUG [PEWorker-2 {}] master.SplitWALManager(158): Acquired split WAL worker=4464c5b832df,39197,1732423777462 2024-11-24T04:49:55,519 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18] 2024-11-24T04:49:55,520 WARN [RPCClient-NioEventLoopGroup-6-5 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 4464c5b832df:39021 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 4464c5b832df/172.17.0.2:39021 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:55,521 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18, error=java.net.ConnectException: Call to address=4464c5b832df:39021 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 4464c5b832df/172.17.0.2:39021 2024-11-24T04:49:55,521 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18 is java.net.ConnectException: Connection refused 2024-11-24T04:49:55,521 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18 from cache 2024-11-24T04:49:55,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 4464c5b832df%2C39021%2C1732423777669.1732423779373, worker=4464c5b832df,39197,1732423777462}] 2024-11-24T04:49:55,522 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.FailedServers(52): Added failed server with address 4464c5b832df:39021 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 4464c5b832df/172.17.0.2:39021 2024-11-24T04:49:55,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:49:55,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39021-0x1016b2cef8a0003, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:49:55,581 INFO [RS:2;4464c5b832df:39021 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T04:49:55,582 INFO [RS:2;4464c5b832df:39021 {}] regionserver.HRegionServer(1031): Exiting; stopping=4464c5b832df,39021,1732423777669; zookeeper connection closed. 2024-11-24T04:49:55,582 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@20b6ee5f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@20b6ee5f 2024-11-24T04:49:55,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39197 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-11-24T04:49:55,706 INFO [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373, size=1.4 K (1405bytes) 2024-11-24T04:49:55,706 INFO [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373 2024-11-24T04:49:55,707 INFO [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373 after 1ms 2024-11-24T04:49:55,710 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:55,710 INFO [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373 took 4ms 2024-11-24T04:49:55,716 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 7fab4f06c042c658eda5a15104ff7acf: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-11-24T04:49:55,716 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373 so closing down 2024-11-24T04:49:55,716 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:55,716 INFO [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:55,717 INFO [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 6 ms; skipped=6; WAL=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373, size=1.4 K, length=1405, corrupted=false, cancelled=false 2024-11-24T04:49:55,717 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373, journal: Splitting hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373, size=1.4 K (1405bytes) at 1732423795706Finishing writing output for hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373 so closing down at 1732423795716 (+10 ms)3 split writer threads finished at 1732423795716Processed 6 edits across 0 Regions in 6 ms; skipped=6; WAL=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373, size=1.4 K, length=1405, corrupted=false, cancelled=false at 1732423795717 (+1 ms) 2024-11-24T04:49:55,717 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373 2024-11-24T04:49:55,718 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4464c5b832df:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-24T04:49:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-11-24T04:49:55,724 INFO [PEWorker-4 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting/4464c5b832df%2C39021%2C1732423777669.1732423779373 to hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs 2024-11-24T04:49:55,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-24T04:49:55,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 4464c5b832df%2C39021%2C1732423777669.1732423779373, worker=4464c5b832df,39197,1732423777462 in 203 msec 2024-11-24T04:49:55,728 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=4464c5b832df,39197,1732423777462 2024-11-24T04:49:55,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-24T04:49:55,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 4464c5b832df%2C39021%2C1732423777669.1732423779373, worker=4464c5b832df,39197,1732423777462 in 218 msec 2024-11-24T04:49:55,733 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting dir is empty, no logs to split. 2024-11-24T04:49:55,733 INFO [PEWorker-1 {}] master.SplitWALManager(105): 4464c5b832df,39021,1732423777669 WAL count=0, meta=false 2024-11-24T04:49:55,733 DEBUG [PEWorker-1 {}] procedure.ServerCrashProcedure(329): Check if 4464c5b832df,39021,1732423777669 WAL splitting is done? wals=0, meta=false 2024-11-24T04:49:55,735 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(321): Remove WAL directory for 4464c5b832df,39021,1732423777669 failed, ignore...File hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,39021,1732423777669-splitting does not exist. 2024-11-24T04:49:55,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN}] 2024-11-24T04:49:55,740 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN 2024-11-24T04:49:55,741 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-24T04:49:55,830 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18] 2024-11-24T04:49:55,831 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.AbstractRpcClient(357): Not trying to connect to 4464c5b832df:39021 this server is in the failed servers list 2024-11-24T04:49:55,832 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=4464c5b832df:39021 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 4464c5b832df:39021 2024-11-24T04:49:55,832 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 4464c5b832df:39021 2024-11-24T04:49:55,832 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39021,1732423777669, seqNum=18 from cache 2024-11-24T04:49:55,892 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(204): Hosts are {4464c5b832df=0} racks are {/default-rack=0} 2024-11-24T04:49:55,893 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-24T04:49:55,893 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-24T04:49:55,893 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-24T04:49:55,893 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-24T04:49:55,893 INFO [4464c5b832df:34701 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-24T04:49:55,893 INFO [4464c5b832df:34701 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-24T04:49:55,893 DEBUG [4464c5b832df:34701 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-24T04:49:55,894 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPENING, regionLocation=4464c5b832df,39197,1732423777462 2024-11-24T04:49:55,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN because future has completed 2024-11-24T04:49:55,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462}] 2024-11-24T04:49:56,057 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:56,057 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 7fab4f06c042c658eda5a15104ff7acf, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:56,057 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,058 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:56,058 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7794): checking encryption for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,058 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7797): checking classloading for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,059 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,060 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf1 2024-11-24T04:49:56,060 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,067 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf1/18c6e766a4aa4e1da931add6686d18b3 2024-11-24T04:49:56,067 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,067 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,068 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fab4f06c042c658eda5a15104ff7acf columnFamilyName cf2 2024-11-24T04:49:56,069 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,075 DEBUG [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/cf2/37dc9b07af424e129e835f8b590bdcc6 2024-11-24T04:49:56,075 INFO [StoreOpener-7fab4f06c042c658eda5a15104ff7acf-1 {}] regionserver.HStore(327): Store=7fab4f06c042c658eda5a15104ff7acf/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,075 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1038): replaying wal for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,077 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,078 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,079 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1048): stopping wal replay for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,079 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,079 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-24T04:49:56,081 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1093): writing seq id for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,082 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1114): Opened 7fab4f06c042c658eda5a15104ff7acf; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71460621, jitterRate=0.0648462325334549}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-24T04:49:56,082 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:49:56,083 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1006): Region open journal for 7fab4f06c042c658eda5a15104ff7acf: Running coprocessor pre-open hook at 1732423796058Writing region info on filesystem at 1732423796058Initializing all the Stores at 1732423796059 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796059Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796059Cleaning up temporary data from old regions at 1732423796079 (+20 ms)Running coprocessor post-open hooks at 1732423796082 (+3 ms)Region opened successfully at 1732423796083 (+1 ms) 2024-11-24T04:49:56,084 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., pid=17, masterSystemTime=1732423796052 2024-11-24T04:49:56,087 DEBUG [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:56,087 INFO [RS_OPEN_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:49:56,088 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=7fab4f06c042c658eda5a15104ff7acf, regionState=OPEN, openSeqNum=18, regionLocation=4464c5b832df,39197,1732423777462 2024-11-24T04:49:56,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462 because future has completed 2024-11-24T04:49:56,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-11-24T04:49:56,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 7fab4f06c042c658eda5a15104ff7acf, server=4464c5b832df,39197,1732423777462 in 191 msec 2024-11-24T04:49:56,096 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-11-24T04:49:56,096 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=7fab4f06c042c658eda5a15104ff7acf, ASSIGN in 355 msec 2024-11-24T04:49:56,096 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(291): removed crashed server 4464c5b832df,39021,1732423777669 after splitting done 2024-11-24T04:49:56,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure 4464c5b832df,39021,1732423777669, splitWal=true, meta=false in 605 msec 2024-11-24T04:49:56,349 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf., hostname=4464c5b832df,39197,1732423777462, seqNum=18] 2024-11-24T04:49:56,368 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=400 (was 404), OpenFileDescriptor=1023 (was 993) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=580 (was 580), ProcessCount=11 (was 11), AvailableMemoryMB=10875 (was 10914) 2024-11-24T04:49:56,384 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=400, OpenFileDescriptor=1023, MaxFileDescriptor=1048576, SystemLoadAverage=580, ProcessCount=11, AvailableMemoryMB=10874 2024-11-24T04:49:56,401 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:56,403 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:56,404 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:56,406 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-14717629, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-14717629, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:56,419 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-14717629/hregion-14717629.1732423796407, exclude list is [], retry=0 2024-11-24T04:49:56,422 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:56,422 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:56,423 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:56,424 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-14717629/hregion-14717629.1732423796407 2024-11-24T04:49:56,426 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:56,427 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 1d76f7395cfcb966641451dd399e7c72, NAME => 'testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:49:56,434 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-11-24T04:49:56,434 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-11-24T04:49:56,436 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-11-24T04:49:56,436 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-11-24T04:49:56,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741893_1071 (size=67) 2024-11-24T04:49:56,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741893_1071 (size=67) 2024-11-24T04:49:56,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741893_1071 (size=67) 2024-11-24T04:49:56,437 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-11-24T04:49:56,437 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-11-24T04:49:56,437 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:56,438 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,440 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName a 2024-11-24T04:49:56,440 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,441 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,441 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,442 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName b 2024-11-24T04:49:56,442 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,443 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,443 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,445 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName c 2024-11-24T04:49:56,445 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,445 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,445 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,446 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,446 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,448 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,448 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,448 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:56,449 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,451 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:56,452 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1d76f7395cfcb966641451dd399e7c72; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63304164, jitterRate=-0.05669444799423218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:56,453 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1d76f7395cfcb966641451dd399e7c72: Writing region info on filesystem at 1732423796437Initializing all the Stores at 1732423796438 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796438Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796438Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796438Cleaning up temporary data from old regions at 1732423796448 (+10 ms)Region opened successfully at 1732423796452 (+4 ms) 2024-11-24T04:49:56,453 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1d76f7395cfcb966641451dd399e7c72, disabling compactions & flushes 2024-11-24T04:49:56,453 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,453 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,453 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. after waiting 0 ms 2024-11-24T04:49:56,453 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,453 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,454 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1d76f7395cfcb966641451dd399e7c72: Waiting for close lock at 1732423796453Disabling compacts and flushes for region at 1732423796453Disabling writes for close at 1732423796453Writing region close event to WAL at 1732423796453Closed at 1732423796453 2024-11-24T04:49:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741892_1070 (size=95) 2024-11-24T04:49:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741892_1070 (size=95) 2024-11-24T04:49:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741892_1070 (size=95) 2024-11-24T04:49:56,459 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:56,459 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-14717629:(num 1732423796407) 2024-11-24T04:49:56,459 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:56,461 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:56,472 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461, exclude list is [], retry=0 2024-11-24T04:49:56,475 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:56,476 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:56,476 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:56,477 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461 2024-11-24T04:49:56,478 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:56,478 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1d76f7395cfcb966641451dd399e7c72, NAME => 'testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:56,478 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:56,478 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,478 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,479 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,480 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName a 2024-11-24T04:49:56,480 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,481 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,481 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,482 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName b 2024-11-24T04:49:56,482 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,482 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,482 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,483 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName c 2024-11-24T04:49:56,483 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,483 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,483 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,484 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,485 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,486 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,486 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,487 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:56,488 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,489 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1d76f7395cfcb966641451dd399e7c72; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60668888, jitterRate=-0.09596312046051025}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:56,489 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1d76f7395cfcb966641451dd399e7c72: Writing region info on filesystem at 1732423796478Initializing all the Stores at 1732423796479 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796479Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796479Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796479Cleaning up temporary data from old regions at 1732423796486 (+7 ms)Region opened successfully at 1732423796489 (+3 ms) 2024-11-24T04:49:56,517 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1d76f7395cfcb966641451dd399e7c72 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-24T04:49:56,532 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/a/2ceb4dd7d7e04148b88d920e3e4c5f9b is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1732423796490/Put/seqid=0 2024-11-24T04:49:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741895_1073 (size=5958) 2024-11-24T04:49:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741895_1073 (size=5958) 2024-11-24T04:49:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741895_1073 (size=5958) 2024-11-24T04:49:56,539 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/a/2ceb4dd7d7e04148b88d920e3e4c5f9b 2024-11-24T04:49:56,557 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/b/3763ccfd38854936b05eedca6f0c710e is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1732423796498/Put/seqid=0 2024-11-24T04:49:56,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741896_1074 (size=5958) 2024-11-24T04:49:56,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741896_1074 (size=5958) 2024-11-24T04:49:56,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741896_1074 (size=5958) 2024-11-24T04:49:56,566 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/b/3763ccfd38854936b05eedca6f0c710e 2024-11-24T04:49:56,584 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/c/8b7c3a18b88b4567949a0fa3dc9efc2d is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1732423796506/Put/seqid=0 2024-11-24T04:49:56,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741897_1075 (size=5958) 2024-11-24T04:49:56,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741897_1075 (size=5958) 2024-11-24T04:49:56,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741897_1075 (size=5958) 2024-11-24T04:49:56,593 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/c/8b7c3a18b88b4567949a0fa3dc9efc2d 2024-11-24T04:49:56,599 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/a/2ceb4dd7d7e04148b88d920e3e4c5f9b as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/a/2ceb4dd7d7e04148b88d920e3e4c5f9b 2024-11-24T04:49:56,604 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/a/2ceb4dd7d7e04148b88d920e3e4c5f9b, entries=10, sequenceid=33, filesize=5.8 K 2024-11-24T04:49:56,606 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/b/3763ccfd38854936b05eedca6f0c710e as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/b/3763ccfd38854936b05eedca6f0c710e 2024-11-24T04:49:56,611 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/b/3763ccfd38854936b05eedca6f0c710e, entries=10, sequenceid=33, filesize=5.8 K 2024-11-24T04:49:56,612 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/c/8b7c3a18b88b4567949a0fa3dc9efc2d as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/c/8b7c3a18b88b4567949a0fa3dc9efc2d 2024-11-24T04:49:56,618 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/c/8b7c3a18b88b4567949a0fa3dc9efc2d, entries=10, sequenceid=33, filesize=5.8 K 2024-11-24T04:49:56,619 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 1d76f7395cfcb966641451dd399e7c72 in 102ms, sequenceid=33, compaction requested=false 2024-11-24T04:49:56,620 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1d76f7395cfcb966641451dd399e7c72: 2024-11-24T04:49:56,620 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1d76f7395cfcb966641451dd399e7c72, disabling compactions & flushes 2024-11-24T04:49:56,620 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,620 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,620 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. after waiting 0 ms 2024-11-24T04:49:56,620 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,621 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. 2024-11-24T04:49:56,621 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1d76f7395cfcb966641451dd399e7c72: Waiting for close lock at 1732423796620Disabling compacts and flushes for region at 1732423796620Disabling writes for close at 1732423796620Writing region close event to WAL at 1732423796621 (+1 ms)Closed at 1732423796621 2024-11-24T04:49:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741894_1072 (size=3385) 2024-11-24T04:49:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741894_1072 (size=3385) 2024-11-24T04:49:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741894_1072 (size=3385) 2024-11-24T04:49:56,629 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/b/3763ccfd38854936b05eedca6f0c710e to hdfs://localhost:38973/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/b/3763ccfd38854936b05eedca6f0c710e 2024-11-24T04:49:56,645 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461, size=3.3 K (3385bytes) 2024-11-24T04:49:56,645 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461 2024-11-24T04:49:56,645 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461 after 0ms 2024-11-24T04:49:56,647 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:56,647 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461 took 3ms 2024-11-24T04:49:56,650 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461 so closing down 2024-11-24T04:49:56,650 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:49:56,650 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1732423796461.temp 2024-11-24T04:49:56,651 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000003-wal.1732423796461.temp 2024-11-24T04:49:56,652 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:49:56,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741898_1076 (size=2944) 2024-11-24T04:49:56,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741898_1076 (size=2944) 2024-11-24T04:49:56,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741898_1076 (size=2944) 2024-11-24T04:49:56,663 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000003-wal.1732423796461.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-24T04:49:56,665 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000003-wal.1732423796461.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000032 2024-11-24T04:49:56,665 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 17 ms; skipped=2; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461, size=3.3 K, length=3385, corrupted=false, cancelled=false 2024-11-24T04:49:56,665 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461, journal: Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461, size=3.3 K (3385bytes) at 1732423796645Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461 so closing down at 1732423796650 (+5 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000003-wal.1732423796461.temp at 1732423796651 (+1 ms)3 split writer threads finished at 1732423796652 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000003-wal.1732423796461.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1732423796663 (+11 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000003-wal.1732423796461.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000032 at 1732423796665 (+2 ms)Processed 32 edits across 1 Regions in 17 ms; skipped=2; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461, size=3.3 K, length=3385, corrupted=false, cancelled=false at 1732423796665 2024-11-24T04:49:56,667 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796461 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423796461 2024-11-24T04:49:56,668 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000032 2024-11-24T04:49:56,668 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:56,669 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:56,681 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796670, exclude list is [], retry=0 2024-11-24T04:49:56,684 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:56,685 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:56,685 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:56,686 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796670 2024-11-24T04:49:56,687 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:49:56,687 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1d76f7395cfcb966641451dd399e7c72, NAME => 'testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:56,687 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:56,688 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,688 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,691 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,693 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName a 2024-11-24T04:49:56,693 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,699 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/a/2ceb4dd7d7e04148b88d920e3e4c5f9b 2024-11-24T04:49:56,699 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,699 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,701 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName b 2024-11-24T04:49:56,701 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,701 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,701 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,702 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d76f7395cfcb966641451dd399e7c72 columnFamilyName c 2024-11-24T04:49:56,702 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,709 DEBUG [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/c/8b7c3a18b88b4567949a0fa3dc9efc2d 2024-11-24T04:49:56,709 INFO [StoreOpener-1d76f7395cfcb966641451dd399e7c72-1 {}] regionserver.HStore(327): Store=1d76f7395cfcb966641451dd399e7c72/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,709 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,710 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,712 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,712 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000032 2024-11-24T04:49:56,715 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:49:56,717 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000032 2024-11-24T04:49:56,717 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1d76f7395cfcb966641451dd399e7c72 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-24T04:49:56,738 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/b/3b564333235f490cba27a0b5671479be is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1732423796498/Put/seqid=0 2024-11-24T04:49:56,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741900_1078 (size=5958) 2024-11-24T04:49:56,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741900_1078 (size=5958) 2024-11-24T04:49:56,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741900_1078 (size=5958) 2024-11-24T04:49:56,745 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/b/3b564333235f490cba27a0b5671479be 2024-11-24T04:49:56,752 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/.tmp/b/3b564333235f490cba27a0b5671479be as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/b/3b564333235f490cba27a0b5671479be 2024-11-24T04:49:56,766 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/b/3b564333235f490cba27a0b5671479be, entries=10, sequenceid=32, filesize=5.8 K 2024-11-24T04:49:56,767 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 1d76f7395cfcb966641451dd399e7c72 in 49ms, sequenceid=32, compaction requested=false; wal=null 2024-11-24T04:49:56,768 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/0000000000000000032 2024-11-24T04:49:56,769 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,769 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,770 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:56,772 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1d76f7395cfcb966641451dd399e7c72 2024-11-24T04:49:56,774 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/1d76f7395cfcb966641451dd399e7c72/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-11-24T04:49:56,776 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1d76f7395cfcb966641451dd399e7c72; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66646394, jitterRate=-0.006891340017318726}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:56,777 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1d76f7395cfcb966641451dd399e7c72: Writing region info on filesystem at 1732423796688Initializing all the Stores at 1732423796689 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796689Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796691 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796691Obtaining lock to block concurrent updates at 1732423796718 (+27 ms)Preparing flush snapshotting stores in 1d76f7395cfcb966641451dd399e7c72 at 1732423796718Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1732423796718Flushing stores of testReplayEditsWrittenViaHRegion,,1732423796402.1d76f7395cfcb966641451dd399e7c72. at 1732423796718Flushing 1d76f7395cfcb966641451dd399e7c72/b: creating writer at 1732423796718Flushing 1d76f7395cfcb966641451dd399e7c72/b: appending metadata at 1732423796737 (+19 ms)Flushing 1d76f7395cfcb966641451dd399e7c72/b: closing flushed file at 1732423796737Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4eff5816: reopening flushed file at 1732423796751 (+14 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 1d76f7395cfcb966641451dd399e7c72 in 49ms, sequenceid=32, compaction requested=false; wal=null at 1732423796767 (+16 ms)Cleaning up temporary data from old regions at 1732423796769 (+2 ms)Region opened successfully at 1732423796777 (+8 ms) 2024-11-24T04:49:56,804 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=407 (was 400) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57690 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:36914 [Waiting for operation #29] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:37074 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:40904 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:40746 [Waiting for operation #38] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57520 [Waiting for operation #35] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1097 (was 1023) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=558 (was 580), ProcessCount=11 (was 11), AvailableMemoryMB=10867 (was 10874) 2024-11-24T04:49:56,805 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1097 is superior to 1024 2024-11-24T04:49:56,817 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=407, OpenFileDescriptor=1097, MaxFileDescriptor=1048576, SystemLoadAverage=558, ProcessCount=11, AvailableMemoryMB=10866 2024-11-24T04:49:56,817 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1097 is superior to 1024 2024-11-24T04:49:56,832 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:56,834 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:49:56,835 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:49:56,837 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-20008658, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-20008658, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:56,850 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-20008658/hregion-20008658.1732423796837, exclude list is [], retry=0 2024-11-24T04:49:56,854 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:56,854 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:56,854 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:56,857 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-20008658/hregion-20008658.1732423796837 2024-11-24T04:49:56,857 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:49:56,857 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 44a012eb1a94695c4627114a25ebd048, NAME => 'testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:49:56,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741902_1080 (size=68) 2024-11-24T04:49:56,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741902_1080 (size=68) 2024-11-24T04:49:56,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741902_1080 (size=68) 2024-11-24T04:49:56,874 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:56,876 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,878 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName a 2024-11-24T04:49:56,878 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,879 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,879 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,881 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName b 2024-11-24T04:49:56,881 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,881 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,882 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,883 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName c 2024-11-24T04:49:56,883 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,884 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,884 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,885 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,886 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,887 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,887 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,888 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:56,889 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,893 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:49:56,893 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 44a012eb1a94695c4627114a25ebd048; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59588695, jitterRate=-0.11205925047397614}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:56,894 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 44a012eb1a94695c4627114a25ebd048: Writing region info on filesystem at 1732423796874Initializing all the Stores at 1732423796875 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796875Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796876 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796876Cleaning up temporary data from old regions at 1732423796887 (+11 ms)Region opened successfully at 1732423796894 (+7 ms) 2024-11-24T04:49:56,894 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 44a012eb1a94695c4627114a25ebd048, disabling compactions & flushes 2024-11-24T04:49:56,895 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:49:56,895 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:49:56,895 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. after waiting 0 ms 2024-11-24T04:49:56,895 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:49:56,895 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:49:56,895 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 44a012eb1a94695c4627114a25ebd048: Waiting for close lock at 1732423796894Disabling compacts and flushes for region at 1732423796894Disabling writes for close at 1732423796895 (+1 ms)Writing region close event to WAL at 1732423796895Closed at 1732423796895 2024-11-24T04:49:56,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741901_1079 (size=95) 2024-11-24T04:49:56,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741901_1079 (size=95) 2024-11-24T04:49:56,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741901_1079 (size=95) 2024-11-24T04:49:56,901 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:49:56,901 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-20008658:(num 1732423796837) 2024-11-24T04:49:56,901 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:49:56,903 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:49:56,915 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903, exclude list is [], retry=0 2024-11-24T04:49:56,918 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:49:56,919 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:49:56,919 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:49:56,923 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903 2024-11-24T04:49:56,924 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:49:56,982 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 44a012eb1a94695c4627114a25ebd048, NAME => 'testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:49:56,984 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,985 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:49:56,985 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,985 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,987 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,988 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName a 2024-11-24T04:49:56,988 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,989 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,989 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,990 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName b 2024-11-24T04:49:56,990 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,991 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,991 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,992 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName c 2024-11-24T04:49:56,992 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:49:56,993 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:49:56,993 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,994 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,995 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,996 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,996 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,997 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:49:56,998 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:56,999 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 44a012eb1a94695c4627114a25ebd048; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75121708, jitterRate=0.11940068006515503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:49:56,999 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:49:57,000 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 44a012eb1a94695c4627114a25ebd048: Running coprocessor pre-open hook at 1732423796985Writing region info on filesystem at 1732423796985Initializing all the Stores at 1732423796986 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796986Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796987 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423796987Cleaning up temporary data from old regions at 1732423796996 (+9 ms)Running coprocessor post-open hooks at 1732423796999 (+3 ms)Region opened successfully at 1732423797000 (+1 ms) 2024-11-24T04:49:57,014 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 44a012eb1a94695c4627114a25ebd048 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-11-24T04:49:57,015 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:58,016 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:58,895 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T04:49:59,018 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:49:59,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741835_1011 (size=1405) 2024-11-24T04:50:00,019 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:01,021 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:02,022 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:03,023 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:04,024 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:05,026 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:05,139 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T04:50:06,027 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 44a012eb1a94695c4627114a25ebd048/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:06,028 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 44a012eb1a94695c4627114a25ebd048: 2024-11-24T04:50:06,029 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:50:06,043 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 44a012eb1a94695c4627114a25ebd048: 2024-11-24T04:50:06,043 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-11-24T04:50:06,044 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 44a012eb1a94695c4627114a25ebd048, disabling compactions & flushes 2024-11-24T04:50:06,044 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:50:06,044 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:50:06,044 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. after waiting 0 ms 2024-11-24T04:50:06,044 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:50:06,044 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:50:06,044 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. 2024-11-24T04:50:06,044 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 44a012eb1a94695c4627114a25ebd048: Waiting for close lock at 1732423806043Running coprocessor pre-close hooks at 1732423806043Disabling compacts and flushes for region at 1732423806044 (+1 ms)Disabling writes for close at 1732423806044Writing region close event to WAL at 1732423806044Running coprocessor post-close hooks at 1732423806044Closed at 1732423806044 2024-11-24T04:50:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741903_1081 (size=2691) 2024-11-24T04:50:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741903_1081 (size=2691) 2024-11-24T04:50:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741903_1081 (size=2691) 2024-11-24T04:50:06,063 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903, size=2.6 K (2691bytes) 2024-11-24T04:50:06,063 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903 2024-11-24T04:50:06,064 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903 after 1ms 2024-11-24T04:50:06,066 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:06,066 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903 took 3ms 2024-11-24T04:50:06,069 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903 so closing down 2024-11-24T04:50:06,069 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:50:06,070 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1732423796903.temp 2024-11-24T04:50:06,071 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000004-wal.1732423796903.temp 2024-11-24T04:50:06,071 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:50:06,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741904_1082 (size=2094) 2024-11-24T04:50:06,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741904_1082 (size=2094) 2024-11-24T04:50:06,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741904_1082 (size=2094) 2024-11-24T04:50:06,083 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000004-wal.1732423796903.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-11-24T04:50:06,084 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000004-wal.1732423796903.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000026 2024-11-24T04:50:06,084 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903, size=2.6 K, length=2691, corrupted=false, cancelled=false 2024-11-24T04:50:06,084 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903, journal: Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903, size=2.6 K (2691bytes) at 1732423806063Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903 so closing down at 1732423806069 (+6 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000004-wal.1732423796903.temp at 1732423806071 (+2 ms)3 split writer threads finished at 1732423806071Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000004-wal.1732423796903.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1732423806083 (+12 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000004-wal.1732423796903.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000026 at 1732423806084 (+1 ms)Processed 23 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903, size=2.6 K, length=2691, corrupted=false, cancelled=false at 1732423806084 2024-11-24T04:50:06,086 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423796903 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423796903 2024-11-24T04:50:06,087 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000026 2024-11-24T04:50:06,087 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:06,089 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:06,106 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423806090, exclude list is [], retry=0 2024-11-24T04:50:06,109 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:06,109 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:06,110 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:06,111 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423806090 2024-11-24T04:50:06,112 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:50:06,112 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 44a012eb1a94695c4627114a25ebd048, NAME => 'testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:06,113 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,113 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:06,113 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,113 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,116 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,117 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName a 2024-11-24T04:50:06,117 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:06,117 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:06,117 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,118 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName b 2024-11-24T04:50:06,118 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:06,119 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:06,119 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,119 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a012eb1a94695c4627114a25ebd048 columnFamilyName c 2024-11-24T04:50:06,119 DEBUG [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:06,120 INFO [StoreOpener-44a012eb1a94695c4627114a25ebd048-1 {}] regionserver.HStore(327): Store=44a012eb1a94695c4627114a25ebd048/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:06,120 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,120 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,122 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,122 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000026 2024-11-24T04:50:06,125 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:06,127 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000026 2024-11-24T04:50:06,127 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 44a012eb1a94695c4627114a25ebd048 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-11-24T04:50:06,142 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/a/4c7184e1a6bc436d8fb821f2a37110d2 is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1732423806036/Put/seqid=0 2024-11-24T04:50:06,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741906_1084 (size=5523) 2024-11-24T04:50:06,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741906_1084 (size=5523) 2024-11-24T04:50:06,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741906_1084 (size=5523) 2024-11-24T04:50:06,150 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/a/4c7184e1a6bc436d8fb821f2a37110d2 2024-11-24T04:50:06,177 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/b/ab5db0616e4040fea52882f6cf120529 is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1732423806029/Put/seqid=0 2024-11-24T04:50:06,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741907_1085 (size=5524) 2024-11-24T04:50:06,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741907_1085 (size=5524) 2024-11-24T04:50:06,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741907_1085 (size=5524) 2024-11-24T04:50:06,184 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/b/ab5db0616e4040fea52882f6cf120529 2024-11-24T04:50:06,211 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/c/ad69631b743c426583866b66b7ee42c0 is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1732423806033/Put/seqid=0 2024-11-24T04:50:06,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741908_1086 (size=5457) 2024-11-24T04:50:06,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741908_1086 (size=5457) 2024-11-24T04:50:06,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741908_1086 (size=5457) 2024-11-24T04:50:06,218 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/c/ad69631b743c426583866b66b7ee42c0 2024-11-24T04:50:06,231 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/a/4c7184e1a6bc436d8fb821f2a37110d2 as hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/a/4c7184e1a6bc436d8fb821f2a37110d2 2024-11-24T04:50:06,238 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/a/4c7184e1a6bc436d8fb821f2a37110d2, entries=7, sequenceid=26, filesize=5.4 K 2024-11-24T04:50:06,239 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/b/ab5db0616e4040fea52882f6cf120529 as hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/b/ab5db0616e4040fea52882f6cf120529 2024-11-24T04:50:06,247 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/b/ab5db0616e4040fea52882f6cf120529, entries=7, sequenceid=26, filesize=5.4 K 2024-11-24T04:50:06,248 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/.tmp/c/ad69631b743c426583866b66b7ee42c0 as hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/c/ad69631b743c426583866b66b7ee42c0 2024-11-24T04:50:06,256 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/c/ad69631b743c426583866b66b7ee42c0, entries=6, sequenceid=26, filesize=5.3 K 2024-11-24T04:50:06,257 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 44a012eb1a94695c4627114a25ebd048 in 130ms, sequenceid=26, compaction requested=false; wal=null 2024-11-24T04:50:06,258 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/0000000000000000026 2024-11-24T04:50:06,259 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,260 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,261 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:06,262 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,265 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsAfterAbortingFlush/44a012eb1a94695c4627114a25ebd048/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-11-24T04:50:06,266 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 44a012eb1a94695c4627114a25ebd048; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61461258, jitterRate=-0.08415588736534119}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:06,267 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 44a012eb1a94695c4627114a25ebd048 2024-11-24T04:50:06,268 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 44a012eb1a94695c4627114a25ebd048: Running coprocessor pre-open hook at 1732423806113Writing region info on filesystem at 1732423806113Initializing all the Stores at 1732423806114 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423806114Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423806115 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423806115Obtaining lock to block concurrent updates at 1732423806127 (+12 ms)Preparing flush snapshotting stores in 44a012eb1a94695c4627114a25ebd048 at 1732423806127Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1732423806127Flushing stores of testReplayEditsAfterAbortingFlush,,1732423796832.44a012eb1a94695c4627114a25ebd048. at 1732423806127Flushing 44a012eb1a94695c4627114a25ebd048/a: creating writer at 1732423806128 (+1 ms)Flushing 44a012eb1a94695c4627114a25ebd048/a: appending metadata at 1732423806142 (+14 ms)Flushing 44a012eb1a94695c4627114a25ebd048/a: closing flushed file at 1732423806142Flushing 44a012eb1a94695c4627114a25ebd048/b: creating writer at 1732423806156 (+14 ms)Flushing 44a012eb1a94695c4627114a25ebd048/b: appending metadata at 1732423806176 (+20 ms)Flushing 44a012eb1a94695c4627114a25ebd048/b: closing flushed file at 1732423806176Flushing 44a012eb1a94695c4627114a25ebd048/c: creating writer at 1732423806190 (+14 ms)Flushing 44a012eb1a94695c4627114a25ebd048/c: appending metadata at 1732423806210 (+20 ms)Flushing 44a012eb1a94695c4627114a25ebd048/c: closing flushed file at 1732423806210Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31af6fbc: reopening flushed file at 1732423806229 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@302f7843: reopening flushed file at 1732423806238 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@327aad36: reopening flushed file at 1732423806247 (+9 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 44a012eb1a94695c4627114a25ebd048 in 130ms, sequenceid=26, compaction requested=false; wal=null at 1732423806257 (+10 ms)Cleaning up temporary data from old regions at 1732423806260 (+3 ms)Running coprocessor post-open hooks at 1732423806267 (+7 ms)Region opened successfully at 1732423806268 (+1 ms) 2024-11-24T04:50:06,292 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=411 (was 407) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:56252 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57828 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:56242 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57926 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57836 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57906 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1159 (was 1097) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=529 (was 558), ProcessCount=11 (was 11), AvailableMemoryMB=10836 (was 10866) 2024-11-24T04:50:06,292 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1159 is superior to 1024 2024-11-24T04:50:06,307 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=411, OpenFileDescriptor=1159, MaxFileDescriptor=1048576, SystemLoadAverage=529, ProcessCount=11, AvailableMemoryMB=10834 2024-11-24T04:50:06,307 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1159 is superior to 1024 2024-11-24T04:50:06,329 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:50:06,332 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:50:06,333 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:50:06,337 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-56974984, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-56974984, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:06,356 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-56974984/hregion-56974984.1732423806337, exclude list is [], retry=0 2024-11-24T04:50:06,360 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:06,365 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:06,365 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:06,368 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-56974984/hregion-56974984.1732423806337 2024-11-24T04:50:06,372 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:50:06,373 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 04049b5260d8c99adafb3c8e77fd0508, NAME => 'testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:50:06,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741910_1088 (size=61) 2024-11-24T04:50:06,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741910_1088 (size=61) 2024-11-24T04:50:06,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741910_1088 (size=61) 2024-11-24T04:50:06,397 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:06,398 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,401 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04049b5260d8c99adafb3c8e77fd0508 columnFamilyName a 2024-11-24T04:50:06,401 DEBUG [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:06,402 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(327): Store=04049b5260d8c99adafb3c8e77fd0508/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:06,402 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,403 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,404 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,404 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,404 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,407 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,410 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:50:06,410 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 04049b5260d8c99adafb3c8e77fd0508; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60505939, jitterRate=-0.09839124977588654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:50:06,411 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 04049b5260d8c99adafb3c8e77fd0508: Writing region info on filesystem at 1732423806397Initializing all the Stores at 1732423806398 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423806398Cleaning up temporary data from old regions at 1732423806404 (+6 ms)Region opened successfully at 1732423806411 (+7 ms) 2024-11-24T04:50:06,411 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 04049b5260d8c99adafb3c8e77fd0508, disabling compactions & flushes 2024-11-24T04:50:06,411 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,411 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,411 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. after waiting 0 ms 2024-11-24T04:50:06,411 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,419 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,419 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 04049b5260d8c99adafb3c8e77fd0508: Waiting for close lock at 1732423806411Disabling compacts and flushes for region at 1732423806411Disabling writes for close at 1732423806411Writing region close event to WAL at 1732423806419 (+8 ms)Closed at 1732423806419 2024-11-24T04:50:06,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741909_1087 (size=95) 2024-11-24T04:50:06,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741909_1087 (size=95) 2024-11-24T04:50:06,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741909_1087 (size=95) 2024-11-24T04:50:06,425 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:50:06,425 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-56974984:(num 1732423806337) 2024-11-24T04:50:06,425 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:06,427 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:06,434 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-11-24T04:50:06,434 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-11-24T04:50:06,435 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-11-24T04:50:06,435 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-11-24T04:50:06,441 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427, exclude list is [], retry=0 2024-11-24T04:50:06,444 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:06,445 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:06,445 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:06,447 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427 2024-11-24T04:50:06,448 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:50:06,448 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 04049b5260d8c99adafb3c8e77fd0508, NAME => 'testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:06,448 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:06,448 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,448 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,450 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,450 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04049b5260d8c99adafb3c8e77fd0508 columnFamilyName a 2024-11-24T04:50:06,451 DEBUG [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:06,451 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(327): Store=04049b5260d8c99adafb3c8e77fd0508/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:06,451 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,452 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,453 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,453 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,453 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,455 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,456 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 04049b5260d8c99adafb3c8e77fd0508; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65733406, jitterRate=-0.02049592137336731}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:50:06,456 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 04049b5260d8c99adafb3c8e77fd0508: Writing region info on filesystem at 1732423806448Initializing all the Stores at 1732423806449 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423806449Cleaning up temporary data from old regions at 1732423806453 (+4 ms)Region opened successfully at 1732423806456 (+3 ms) 2024-11-24T04:50:06,468 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 04049b5260d8c99adafb3c8e77fd0508, disabling compactions & flushes 2024-11-24T04:50:06,468 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,468 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,468 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. after waiting 0 ms 2024-11-24T04:50:06,468 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,468 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,468 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. 2024-11-24T04:50:06,468 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 04049b5260d8c99adafb3c8e77fd0508: Waiting for close lock at 1732423806468Disabling compacts and flushes for region at 1732423806468Disabling writes for close at 1732423806468Writing region close event to WAL at 1732423806468Closed at 1732423806468 2024-11-24T04:50:06,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741911_1089 (size=1050) 2024-11-24T04:50:06,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741911_1089 (size=1050) 2024-11-24T04:50:06,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741911_1089 (size=1050) 2024-11-24T04:50:06,486 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427, size=1.0 K (1050bytes) 2024-11-24T04:50:06,486 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427 2024-11-24T04:50:06,486 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427 after 0ms 2024-11-24T04:50:06,488 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:06,488 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427 took 2ms 2024-11-24T04:50:06,490 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427 so closing down 2024-11-24T04:50:06,490 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:50:06,491 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1732423806427.temp 2024-11-24T04:50:06,492 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000003-wal.1732423806427.temp 2024-11-24T04:50:06,493 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:50:06,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741912_1090 (size=1050) 2024-11-24T04:50:06,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741912_1090 (size=1050) 2024-11-24T04:50:06,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741912_1090 (size=1050) 2024-11-24T04:50:06,499 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000003-wal.1732423806427.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-11-24T04:50:06,501 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000003-wal.1732423806427.temp to hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012 2024-11-24T04:50:06,501 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 12 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-11-24T04:50:06,501 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427, journal: Splitting hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427, size=1.0 K (1050bytes) at 1732423806486Finishing writing output for hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427 so closing down at 1732423806490 (+4 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000003-wal.1732423806427.temp at 1732423806492 (+2 ms)3 split writer threads finished at 1732423806493 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000003-wal.1732423806427.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1732423806499 (+6 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000003-wal.1732423806427.temp to hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012 at 1732423806501 (+2 ms)Processed 10 edits across 1 Regions in 12 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1732423806501 2024-11-24T04:50:06,503 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806427 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423806427 2024-11-24T04:50:06,504 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012 2024-11-24T04:50:06,507 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:06,829 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:06,831 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:06,843 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806831, exclude list is [], retry=0 2024-11-24T04:50:06,845 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:06,846 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:06,846 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:06,847 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806831 2024-11-24T04:50:06,848 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:50:06,848 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 04049b5260d8c99adafb3c8e77fd0508, NAME => 'testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:06,848 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:06,848 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,848 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,852 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,854 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04049b5260d8c99adafb3c8e77fd0508 columnFamilyName a 2024-11-24T04:50:06,854 DEBUG [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:06,855 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(327): Store=04049b5260d8c99adafb3c8e77fd0508/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:06,855 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,856 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,857 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,858 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012 2024-11-24T04:50:06,860 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:06,861 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012 2024-11-24T04:50:06,861 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 04049b5260d8c99adafb3c8e77fd0508 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-11-24T04:50:06,878 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/.tmp/a/8f23e3494ffd450baba5a3eaefedbb7b is 79, key is testDatalossWhenInputError/a:x0/1732423806456/Put/seqid=0 2024-11-24T04:50:06,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741914_1092 (size=5808) 2024-11-24T04:50:06,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741914_1092 (size=5808) 2024-11-24T04:50:06,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741914_1092 (size=5808) 2024-11-24T04:50:06,886 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/.tmp/a/8f23e3494ffd450baba5a3eaefedbb7b 2024-11-24T04:50:06,896 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/.tmp/a/8f23e3494ffd450baba5a3eaefedbb7b as hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/a/8f23e3494ffd450baba5a3eaefedbb7b 2024-11-24T04:50:06,905 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/a/8f23e3494ffd450baba5a3eaefedbb7b, entries=10, sequenceid=12, filesize=5.7 K 2024-11-24T04:50:06,905 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 04049b5260d8c99adafb3c8e77fd0508 in 44ms, sequenceid=12, compaction requested=false; wal=null 2024-11-24T04:50:06,906 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/0000000000000000012 2024-11-24T04:50:06,907 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,907 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,910 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,912 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-11-24T04:50:06,913 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 04049b5260d8c99adafb3c8e77fd0508; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64959605, jitterRate=-0.03202645480632782}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:50:06,914 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 04049b5260d8c99adafb3c8e77fd0508: Writing region info on filesystem at 1732423806848Initializing all the Stores at 1732423806852 (+4 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423806852Obtaining lock to block concurrent updates at 1732423806861 (+9 ms)Preparing flush snapshotting stores in 04049b5260d8c99adafb3c8e77fd0508 at 1732423806861Finished memstore snapshotting testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1732423806861Flushing stores of testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508. at 1732423806861Flushing 04049b5260d8c99adafb3c8e77fd0508/a: creating writer at 1732423806861Flushing 04049b5260d8c99adafb3c8e77fd0508/a: appending metadata at 1732423806877 (+16 ms)Flushing 04049b5260d8c99adafb3c8e77fd0508/a: closing flushed file at 1732423806877Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b5ca36d: reopening flushed file at 1732423806894 (+17 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 04049b5260d8c99adafb3c8e77fd0508 in 44ms, sequenceid=12, compaction requested=false; wal=null at 1732423806905 (+11 ms)Cleaning up temporary data from old regions at 1732423806907 (+2 ms)Region opened successfully at 1732423806914 (+7 ms) 2024-11-24T04:50:06,917 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 04049b5260d8c99adafb3c8e77fd0508, NAME => 'testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:06,917 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1732423806330.04049b5260d8c99adafb3c8e77fd0508.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:06,917 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,917 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,918 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,919 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04049b5260d8c99adafb3c8e77fd0508 columnFamilyName a 2024-11-24T04:50:06,919 DEBUG [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:06,925 DEBUG [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/a/8f23e3494ffd450baba5a3eaefedbb7b 2024-11-24T04:50:06,925 INFO [StoreOpener-04049b5260d8c99adafb3c8e77fd0508-1 {}] regionserver.HStore(327): Store=04049b5260d8c99adafb3c8e77fd0508/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:06,925 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,926 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,927 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,927 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,927 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,929 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 04049b5260d8c99adafb3c8e77fd0508 2024-11-24T04:50:06,931 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testDatalossWhenInputError/04049b5260d8c99adafb3c8e77fd0508/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-11-24T04:50:06,932 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 04049b5260d8c99adafb3c8e77fd0508; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63598517, jitterRate=-0.05230824649333954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T04:50:06,932 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 04049b5260d8c99adafb3c8e77fd0508: Writing region info on filesystem at 1732423806917Initializing all the Stores at 1732423806918 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423806918Cleaning up temporary data from old regions at 1732423806927 (+9 ms)Region opened successfully at 1732423806932 (+5 ms) 2024-11-24T04:50:06,950 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=421 (was 411) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57968 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57906 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:56252 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1373737803-172.17.0.2-1732423771153:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57836 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:56334 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57906 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1240 (was 1159) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=527 (was 529), ProcessCount=11 (was 11), AvailableMemoryMB=10823 (was 10834) 2024-11-24T04:50:06,950 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1240 is superior to 1024 2024-11-24T04:50:06,964 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=421, OpenFileDescriptor=1240, MaxFileDescriptor=1048576, SystemLoadAverage=527, ProcessCount=11, AvailableMemoryMB=10822 2024-11-24T04:50:06,964 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1240 is superior to 1024 2024-11-24T04:50:06,980 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:50:06,983 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:50:06,983 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:50:06,986 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-67711307, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-67711307, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:07,006 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-67711307/hregion-67711307.1732423806987, exclude list is [], retry=0 2024-11-24T04:50:07,009 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:07,009 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:07,010 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:07,015 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-67711307/hregion-67711307.1732423806987 2024-11-24T04:50:07,016 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:50:07,016 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 76749f1db87289a38b0c7d656ed5b68e, NAME => 'testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:50:07,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741916_1094 (size=63) 2024-11-24T04:50:07,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741916_1094 (size=63) 2024-11-24T04:50:07,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741916_1094 (size=63) 2024-11-24T04:50:07,029 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:07,030 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,031 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName a 2024-11-24T04:50:07,031 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:07,032 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:07,032 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,033 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName b 2024-11-24T04:50:07,033 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:07,033 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:07,034 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,035 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName c 2024-11-24T04:50:07,035 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:07,035 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:07,036 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,036 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,037 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,038 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,038 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,038 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:07,039 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,041 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:50:07,042 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 76749f1db87289a38b0c7d656ed5b68e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69557629, jitterRate=0.036489441990852356}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:07,042 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 76749f1db87289a38b0c7d656ed5b68e: Writing region info on filesystem at 1732423807029Initializing all the Stores at 1732423807030 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423807030Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423807030Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423807030Cleaning up temporary data from old regions at 1732423807038 (+8 ms)Region opened successfully at 1732423807042 (+4 ms) 2024-11-24T04:50:07,042 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 76749f1db87289a38b0c7d656ed5b68e, disabling compactions & flushes 2024-11-24T04:50:07,042 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:07,042 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:07,042 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. after waiting 0 ms 2024-11-24T04:50:07,042 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:07,043 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:07,043 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 76749f1db87289a38b0c7d656ed5b68e: Waiting for close lock at 1732423807042Disabling compacts and flushes for region at 1732423807042Disabling writes for close at 1732423807042Writing region close event to WAL at 1732423807043 (+1 ms)Closed at 1732423807043 2024-11-24T04:50:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741915_1093 (size=95) 2024-11-24T04:50:07,045 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-67711307/hregion-67711307.1732423806987 not finished, retry = 0 2024-11-24T04:50:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741915_1093 (size=95) 2024-11-24T04:50:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741915_1093 (size=95) 2024-11-24T04:50:07,151 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:50:07,151 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-67711307:(num 1732423806987) 2024-11-24T04:50:07,152 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:07,156 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:07,169 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156, exclude list is [], retry=0 2024-11-24T04:50:07,172 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:07,172 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:07,172 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:07,174 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 2024-11-24T04:50:07,174 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:50:07,174 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 76749f1db87289a38b0c7d656ed5b68e, NAME => 'testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:07,174 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:07,174 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,174 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,176 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,177 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName a 2024-11-24T04:50:07,177 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:07,177 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:07,177 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,178 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName b 2024-11-24T04:50:07,178 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:07,179 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:07,179 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,180 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName c 2024-11-24T04:50:07,180 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:07,180 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:07,180 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,181 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,183 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,184 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,184 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,184 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:07,186 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:07,187 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 76749f1db87289a38b0c7d656ed5b68e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72020087, jitterRate=0.07318292558193207}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:07,188 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 76749f1db87289a38b0c7d656ed5b68e: Writing region info on filesystem at 1732423807175Initializing all the Stores at 1732423807176 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423807176Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423807176Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423807176Cleaning up temporary data from old regions at 1732423807184 (+8 ms)Region opened successfully at 1732423807188 (+4 ms) 2024-11-24T04:50:07,192 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1732423807192/Put/seqid=0 2024-11-24T04:50:07,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741918_1096 (size=4875) 2024-11-24T04:50:07,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741918_1096 (size=4875) 2024-11-24T04:50:07,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741918_1096 (size=4875) 2024-11-24T04:50:07,204 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1732423807204/Put/seqid=0 2024-11-24T04:50:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741919_1097 (size=4875) 2024-11-24T04:50:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741919_1097 (size=4875) 2024-11-24T04:50:07,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741919_1097 (size=4875) 2024-11-24T04:50:07,216 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1732423807216/Put/seqid=0 2024-11-24T04:50:07,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741920_1098 (size=4875) 2024-11-24T04:50:07,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741920_1098 (size=4875) 2024-11-24T04:50:07,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741920_1098 (size=4875) 2024-11-24T04:50:07,223 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 76749f1db87289a38b0c7d656ed5b68e/a 2024-11-24T04:50:07,226 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-11-24T04:50:07,226 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-24T04:50:07,227 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 76749f1db87289a38b0c7d656ed5b68e/a 2024-11-24T04:50:07,230 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-11-24T04:50:07,230 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-24T04:50:07,230 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 76749f1db87289a38b0c7d656ed5b68e/a 2024-11-24T04:50:07,233 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-11-24T04:50:07,233 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-24T04:50:07,233 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 76749f1db87289a38b0c7d656ed5b68e 3/3 column families, dataSize=51 B heapSize=896 B 2024-11-24T04:50:07,247 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/.tmp/a/ada700b88aef4f0a9f8178c5f0769210 is 55, key is testCompactedBulkLoadedFiles/a:a/1732423807188/Put/seqid=0 2024-11-24T04:50:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741921_1099 (size=5107) 2024-11-24T04:50:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741921_1099 (size=5107) 2024-11-24T04:50:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741921_1099 (size=5107) 2024-11-24T04:50:07,254 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/.tmp/a/ada700b88aef4f0a9f8178c5f0769210 2024-11-24T04:50:07,260 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/.tmp/a/ada700b88aef4f0a9f8178c5f0769210 as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210 2024-11-24T04:50:07,266 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210, entries=1, sequenceid=4, filesize=5.0 K 2024-11-24T04:50:07,267 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 76749f1db87289a38b0c7d656ed5b68e in 34ms, sequenceid=4, compaction requested=false 2024-11-24T04:50:07,267 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 76749f1db87289a38b0c7d656ed5b68e: 2024-11-24T04:50:07,268 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_ 2024-11-24T04:50:07,269 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_ 2024-11-24T04:50:07,270 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_ 2024-11-24T04:50:07,271 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile0 into 76749f1db87289a38b0c7d656ed5b68e/a as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_ - updating store file list. 2024-11-24T04:50:07,275 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-24T04:50:07,275 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_ into 76749f1db87289a38b0c7d656ed5b68e/a 2024-11-24T04:50:07,275 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile0 into 76749f1db87289a38b0c7d656ed5b68e/a (new location: hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_) 2024-11-24T04:50:07,277 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile1 into 76749f1db87289a38b0c7d656ed5b68e/a as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_ - updating store file list. 2024-11-24T04:50:07,281 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for ccedafbd9bc34131b52ea449ef191902_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-24T04:50:07,281 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_ into 76749f1db87289a38b0c7d656ed5b68e/a 2024-11-24T04:50:07,281 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile1 into 76749f1db87289a38b0c7d656ed5b68e/a (new location: hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_) 2024-11-24T04:50:07,283 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile2 into 76749f1db87289a38b0c7d656ed5b68e/a as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_ - updating store file list. 2024-11-24T04:50:07,287 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9bc0b99a5479462bb04211600f090081_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-24T04:50:07,287 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_ into 76749f1db87289a38b0c7d656ed5b68e/a 2024-11-24T04:50:07,288 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38973/hbase/testCompactedBulkLoadedFiles/hfile2 into 76749f1db87289a38b0c7d656ed5b68e/a (new location: hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_) 2024-11-24T04:50:07,294 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-24T04:50:07,294 DEBUG [Time-limited test {}] regionserver.HStore(1541): 76749f1db87289a38b0c7d656ed5b68e/a is initiating major compaction (all files) 2024-11-24T04:50:07,295 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 76749f1db87289a38b0c7d656ed5b68e/a in testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:07,295 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_] into tmpdir=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/.tmp, totalSize=19.3 K 2024-11-24T04:50:07,295 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ada700b88aef4f0a9f8178c5f0769210, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1732423807188 2024-11-24T04:50:07,296 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-24T04:50:07,296 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ccedafbd9bc34131b52ea449ef191902_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-24T04:50:07,297 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9bc0b99a5479462bb04211600f090081_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-24T04:50:07,312 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/.tmp/a/6b3bf7676e9c467aaab5b8950f452acc is 55, key is testCompactedBulkLoadedFiles/a:a/1732423807188/Put/seqid=0 2024-11-24T04:50:07,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741922_1100 (size=6154) 2024-11-24T04:50:07,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741922_1100 (size=6154) 2024-11-24T04:50:07,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741922_1100 (size=6154) 2024-11-24T04:50:07,326 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/.tmp/a/6b3bf7676e9c467aaab5b8950f452acc as hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/6b3bf7676e9c467aaab5b8950f452acc 2024-11-24T04:50:07,333 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 76749f1db87289a38b0c7d656ed5b68e/a of 76749f1db87289a38b0c7d656ed5b68e into 6b3bf7676e9c467aaab5b8950f452acc(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T04:50:07,333 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 76749f1db87289a38b0c7d656ed5b68e: 2024-11-24T04:50:07,333 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-24T04:50:07,333 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-24T04:50:07,366 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156, size=0 (0bytes) 2024-11-24T04:50:07,367 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 might be still open, length is 0 2024-11-24T04:50:07,367 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 2024-11-24T04:50:07,367 WARN [IPC Server handler 3 on default port 38973 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 has not been closed. Lease recovery is in progress. RecoveryId = 1101 for block blk_1073741917_1095 2024-11-24T04:50:07,368 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 after 1ms 2024-11-24T04:50:08,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:56360 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:44795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56360 dst: /127.0.0.1:44795 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44795 remote=/127.0.0.1:56360]. Total timeout mills is 60000, 59254 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:08,079 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:57940 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:44605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57940 dst: /127.0.0.1:44605 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:08,079 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:58000 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:36429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58000 dst: /127.0.0.1:36429 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:08,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741917_1101 (size=1173) 2024-11-24T04:50:08,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741917_1101 (size=1173) 2024-11-24T04:50:11,369 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 after 4002ms 2024-11-24T04:50:11,377 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:11,378 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 took 4011ms 2024-11-24T04:50:11,381 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156; continuing. 2024-11-24T04:50:11,381 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 so closing down 2024-11-24T04:50:11,381 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:50:11,383 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1732423807156.temp 2024-11-24T04:50:11,384 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000003-wal.1732423807156.temp 2024-11-24T04:50:11,384 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:50:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741923_1102 (size=548) 2024-11-24T04:50:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741923_1102 (size=548) 2024-11-24T04:50:11,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741923_1102 (size=548) 2024-11-24T04:50:11,392 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000003-wal.1732423807156.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-24T04:50:11,393 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000003-wal.1732423807156.temp to hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000008 2024-11-24T04:50:11,394 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156, size=0, length=0, corrupted=false, cancelled=false 2024-11-24T04:50:11,394 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156, journal: Splitting hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156, size=0 (0bytes) at 1732423807367Finishing writing output for hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 so closing down at 1732423811381 (+4014 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000003-wal.1732423807156.temp at 1732423811384 (+3 ms)3 split writer threads finished at 1732423811384Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000003-wal.1732423807156.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1732423811392 (+8 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000003-wal.1732423807156.temp to hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000008 at 1732423811393 (+1 ms)Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156, size=0, length=0, corrupted=false, cancelled=false at 1732423811394 (+1 ms) 2024-11-24T04:50:11,395 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423807156 2024-11-24T04:50:11,396 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000008 2024-11-24T04:50:11,396 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:11,398 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:11,410 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423811398, exclude list is [], retry=0 2024-11-24T04:50:11,412 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:11,412 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:11,412 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:11,414 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423811398 2024-11-24T04:50:11,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:50:11,414 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 76749f1db87289a38b0c7d656ed5b68e, NAME => 'testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:11,414 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:11,414 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,414 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,416 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,416 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName a 2024-11-24T04:50:11,417 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,422 DEBUG [StoreFileOpener-76749f1db87289a38b0c7d656ed5b68e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-24T04:50:11,423 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_ 2024-11-24T04:50:11,426 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/6b3bf7676e9c467aaab5b8950f452acc 2024-11-24T04:50:11,429 DEBUG [StoreFileOpener-76749f1db87289a38b0c7d656ed5b68e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9bc0b99a5479462bb04211600f090081_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-24T04:50:11,429 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_ 2024-11-24T04:50:11,433 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210 2024-11-24T04:50:11,437 DEBUG [StoreFileOpener-76749f1db87289a38b0c7d656ed5b68e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for ccedafbd9bc34131b52ea449ef191902_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-24T04:50:11,437 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_ 2024-11-24T04:50:11,437 WARN [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@5fb28867 2024-11-24T04:50:11,437 WARN [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@5fb28867 2024-11-24T04:50:11,437 WARN [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@5fb28867 2024-11-24T04:50:11,437 WARN [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@5fb28867 2024-11-24T04:50:11,437 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_] to archive 2024-11-24T04:50:11,438 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T04:50:11,440 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_ to hdfs://localhost:38973/hbase/archive/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_ 2024-11-24T04:50:11,441 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_ to hdfs://localhost:38973/hbase/archive/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/9bc0b99a5479462bb04211600f090081_SeqId_4_ 2024-11-24T04:50:11,443 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210 to hdfs://localhost:38973/hbase/archive/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ada700b88aef4f0a9f8178c5f0769210 2024-11-24T04:50:11,444 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_ to hdfs://localhost:38973/hbase/archive/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/a/ccedafbd9bc34131b52ea449ef191902_SeqId_4_ 2024-11-24T04:50:11,444 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,444 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,445 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName b 2024-11-24T04:50:11,445 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,446 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,446 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,447 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76749f1db87289a38b0c7d656ed5b68e columnFamilyName c 2024-11-24T04:50:11,447 DEBUG [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,447 INFO [StoreOpener-76749f1db87289a38b0c7d656ed5b68e-1 {}] regionserver.HStore(327): Store=76749f1db87289a38b0c7d656ed5b68e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,447 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,448 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,450 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,451 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000008 2024-11-24T04:50:11,453 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:11,454 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 76749f1db87289a38b0c7d656ed5b68e : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "76749f1db87289a38b0c7d656ed5b68e" family_name: "a" compaction_input: "ada700b88aef4f0a9f8178c5f0769210" compaction_input: "3efa2a6ff98b4472b8de07ba853b82c8_SeqId_4_" compaction_input: "ccedafbd9bc34131b52ea449ef191902_SeqId_4_" compaction_input: "9bc0b99a5479462bb04211600f090081_SeqId_4_" compaction_output: "6b3bf7676e9c467aaab5b8950f452acc" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-11-24T04:50:11,454 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-11-24T04:50:11,455 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000008 2024-11-24T04:50:11,455 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/0000000000000000008 2024-11-24T04:50:11,456 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,456 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,457 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:11,459 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 76749f1db87289a38b0c7d656ed5b68e 2024-11-24T04:50:11,461 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testCompactedBulkLoadedFiles/76749f1db87289a38b0c7d656ed5b68e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-24T04:50:11,462 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 76749f1db87289a38b0c7d656ed5b68e; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60759896, jitterRate=-0.09460699558258057}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:11,462 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 76749f1db87289a38b0c7d656ed5b68e: Writing region info on filesystem at 1732423811415Initializing all the Stores at 1732423811415Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811415Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811416 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811416Cleaning up temporary data from old regions at 1732423811456 (+40 ms)Region opened successfully at 1732423811462 (+6 ms) 2024-11-24T04:50:11,464 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 76749f1db87289a38b0c7d656ed5b68e, disabling compactions & flushes 2024-11-24T04:50:11,465 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:11,465 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:11,465 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. after waiting 0 ms 2024-11-24T04:50:11,465 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:11,465 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1732423806981.76749f1db87289a38b0c7d656ed5b68e. 2024-11-24T04:50:11,465 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 76749f1db87289a38b0c7d656ed5b68e: Waiting for close lock at 1732423811464Disabling compacts and flushes for region at 1732423811464Disabling writes for close at 1732423811465 (+1 ms)Writing region close event to WAL at 1732423811465Closed at 1732423811465 2024-11-24T04:50:11,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741924_1103 (size=95) 2024-11-24T04:50:11,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741924_1103 (size=95) 2024-11-24T04:50:11,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741924_1103 (size=95) 2024-11-24T04:50:11,470 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:50:11,470 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1732423811398) 2024-11-24T04:50:11,482 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=435 (was 421) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_264930692_22 at /127.0.0.1:58018 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (139959168) connection to localhost/127.0.0.1:38973 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_264930692_22 at /127.0.0.1:58068 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_264930692_22 at /127.0.0.1:56444 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:38973 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1324 (was 1240) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=527 (was 527), ProcessCount=11 (was 11), AvailableMemoryMB=10812 (was 10822) 2024-11-24T04:50:11,482 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1324 is superior to 1024 2024-11-24T04:50:11,493 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=435, OpenFileDescriptor=1324, MaxFileDescriptor=1048576, SystemLoadAverage=527, ProcessCount=11, AvailableMemoryMB=10812 2024-11-24T04:50:11,493 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1324 is superior to 1024 2024-11-24T04:50:11,507 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:50:11,509 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T04:50:11,509 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-24T04:50:11,511 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-18740003, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/hregion-18740003, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:11,522 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-18740003/hregion-18740003.1732423811511, exclude list is [], retry=0 2024-11-24T04:50:11,525 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:11,525 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:11,526 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:11,527 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-18740003/hregion-18740003.1732423811511 2024-11-24T04:50:11,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871)] 2024-11-24T04:50:11,528 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => a4779f68db0aa47d61b8c14b2e1d389c, NAME => 'testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38973/hbase 2024-11-24T04:50:11,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741926_1105 (size=67) 2024-11-24T04:50:11,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741926_1105 (size=67) 2024-11-24T04:50:11,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741926_1105 (size=67) 2024-11-24T04:50:11,537 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:11,539 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,540 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName a 2024-11-24T04:50:11,540 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,540 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,540 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,541 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName b 2024-11-24T04:50:11,541 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,542 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,542 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,543 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName c 2024-11-24T04:50:11,543 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,543 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,543 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,544 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,545 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,546 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,546 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,546 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:11,547 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,550 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T04:50:11,550 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened a4779f68db0aa47d61b8c14b2e1d389c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59987253, jitterRate=-0.1061202734708786}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:11,550 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for a4779f68db0aa47d61b8c14b2e1d389c: Writing region info on filesystem at 1732423811538Initializing all the Stores at 1732423811538Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811538Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811538Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811538Cleaning up temporary data from old regions at 1732423811546 (+8 ms)Region opened successfully at 1732423811550 (+4 ms) 2024-11-24T04:50:11,550 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing a4779f68db0aa47d61b8c14b2e1d389c, disabling compactions & flushes 2024-11-24T04:50:11,550 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,550 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,550 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. after waiting 0 ms 2024-11-24T04:50:11,550 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,551 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,551 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for a4779f68db0aa47d61b8c14b2e1d389c: Waiting for close lock at 1732423811550Disabling compacts and flushes for region at 1732423811550Disabling writes for close at 1732423811550Writing region close event to WAL at 1732423811551 (+1 ms)Closed at 1732423811551 2024-11-24T04:50:11,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741925_1104 (size=95) 2024-11-24T04:50:11,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741925_1104 (size=95) 2024-11-24T04:50:11,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741925_1104 (size=95) 2024-11-24T04:50:11,555 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:50:11,555 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-18740003:(num 1732423811511) 2024-11-24T04:50:11,555 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:11,556 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:11,568 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557, exclude list is [], retry=0 2024-11-24T04:50:11,570 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:11,570 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:11,570 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:11,572 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557 2024-11-24T04:50:11,572 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36007:36007),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591)] 2024-11-24T04:50:11,572 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => a4779f68db0aa47d61b8c14b2e1d389c, NAME => 'testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:11,572 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:11,572 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,572 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,573 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,573 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T04:50:11,574 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName a 2024-11-24T04:50:11,574 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,575 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,575 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,575 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName b 2024-11-24T04:50:11,575 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,576 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,576 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,576 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName c 2024-11-24T04:50:11,577 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,577 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,577 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,577 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,578 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,579 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,579 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,580 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:11,581 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,582 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened a4779f68db0aa47d61b8c14b2e1d389c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62303598, jitterRate=-0.07160404324531555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:11,582 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for a4779f68db0aa47d61b8c14b2e1d389c: Writing region info on filesystem at 1732423811572Initializing all the Stores at 1732423811573 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811573Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811573Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811573Cleaning up temporary data from old regions at 1732423811579 (+6 ms)Region opened successfully at 1732423811582 (+3 ms) 2024-11-24T04:50:11,589 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a4779f68db0aa47d61b8c14b2e1d389c 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-24T04:50:11,603 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/a/18e104196b59495d8f6359e9fce708f3 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1732423811582/Put/seqid=0 2024-11-24T04:50:11,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741928_1107 (size=5958) 2024-11-24T04:50:11,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741928_1107 (size=5958) 2024-11-24T04:50:11,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741928_1107 (size=5958) 2024-11-24T04:50:11,611 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/a/18e104196b59495d8f6359e9fce708f3 2024-11-24T04:50:11,616 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/a/18e104196b59495d8f6359e9fce708f3 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/a/18e104196b59495d8f6359e9fce708f3 2024-11-24T04:50:11,622 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/a/18e104196b59495d8f6359e9fce708f3, entries=10, sequenceid=13, filesize=5.8 K 2024-11-24T04:50:11,623 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for a4779f68db0aa47d61b8c14b2e1d389c in 34ms, sequenceid=13, compaction requested=false 2024-11-24T04:50:11,623 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a4779f68db0aa47d61b8c14b2e1d389c: 2024-11-24T04:50:11,638 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing a4779f68db0aa47d61b8c14b2e1d389c, disabling compactions & flushes 2024-11-24T04:50:11,638 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,638 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,638 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. after waiting 0 ms 2024-11-24T04:50:11,638 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,639 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,639 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:11,639 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for a4779f68db0aa47d61b8c14b2e1d389c: Waiting for close lock at 1732423811638Disabling compacts and flushes for region at 1732423811638Disabling writes for close at 1732423811638Writing region close event to WAL at 1732423811639 (+1 ms)Closed at 1732423811639 2024-11-24T04:50:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741927_1106 (size=3342) 2024-11-24T04:50:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741927_1106 (size=3342) 2024-11-24T04:50:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741927_1106 (size=3342) 2024-11-24T04:50:11,655 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557, size=3.3 K (3342bytes) 2024-11-24T04:50:11,655 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557 2024-11-24T04:50:11,655 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557 after 0ms 2024-11-24T04:50:11,657 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:11,657 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557 took 2ms 2024-11-24T04:50:11,659 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557 so closing down 2024-11-24T04:50:11,659 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:50:11,660 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1732423811557.temp 2024-11-24T04:50:11,661 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000003-wal.1732423811557.temp 2024-11-24T04:50:11,662 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:50:11,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741929_1108 (size=2944) 2024-11-24T04:50:11,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741929_1108 (size=2944) 2024-11-24T04:50:11,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741929_1108 (size=2944) 2024-11-24T04:50:11,667 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000003-wal.1732423811557.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-24T04:50:11,668 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000003-wal.1732423811557.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000035 2024-11-24T04:50:11,668 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 10 ms; skipped=2; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557, size=3.3 K, length=3342, corrupted=false, cancelled=false 2024-11-24T04:50:11,669 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557, journal: Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557, size=3.3 K (3342bytes) at 1732423811655Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557 so closing down at 1732423811659 (+4 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000003-wal.1732423811557.temp at 1732423811661 (+2 ms)3 split writer threads finished at 1732423811662 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000003-wal.1732423811557.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1732423811667 (+5 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000003-wal.1732423811557.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000035 at 1732423811668 (+1 ms)Processed 32 edits across 1 Regions in 10 ms; skipped=2; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557, size=3.3 K, length=3342, corrupted=false, cancelled=false at 1732423811668 2024-11-24T04:50:11,670 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811557 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423811557 2024-11-24T04:50:11,671 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000035 2024-11-24T04:50:11,671 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:11,672 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:11,685 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672, exclude list is [], retry=0 2024-11-24T04:50:11,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:11,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:11,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:11,689 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 2024-11-24T04:50:11,689 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:50:11,689 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => a4779f68db0aa47d61b8c14b2e1d389c, NAME => 'testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c.', STARTKEY => '', ENDKEY => ''} 2024-11-24T04:50:11,689 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:11,689 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,689 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,691 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,692 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName a 2024-11-24T04:50:11,692 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,697 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/a/18e104196b59495d8f6359e9fce708f3 2024-11-24T04:50:11,697 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,697 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,698 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName b 2024-11-24T04:50:11,698 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,698 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,699 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,699 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName c 2024-11-24T04:50:11,699 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:11,700 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:11,700 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,700 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,702 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,702 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000035 2024-11-24T04:50:11,704 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:11,705 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000035 2024-11-24T04:50:11,705 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a4779f68db0aa47d61b8c14b2e1d389c 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-11-24T04:50:11,720 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/b/fd92cb29daa942bc981c094d8e6960f7 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1732423811623/Put/seqid=0 2024-11-24T04:50:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741931_1110 (size=5958) 2024-11-24T04:50:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741931_1110 (size=5958) 2024-11-24T04:50:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741931_1110 (size=5958) 2024-11-24T04:50:11,726 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/b/fd92cb29daa942bc981c094d8e6960f7 2024-11-24T04:50:11,745 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/c/9951b62874f64cbe91e6798ba7554407 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1732423811629/Put/seqid=0 2024-11-24T04:50:11,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741932_1111 (size=5958) 2024-11-24T04:50:11,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741932_1111 (size=5958) 2024-11-24T04:50:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741932_1111 (size=5958) 2024-11-24T04:50:11,752 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/c/9951b62874f64cbe91e6798ba7554407 2024-11-24T04:50:11,757 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/b/fd92cb29daa942bc981c094d8e6960f7 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/b/fd92cb29daa942bc981c094d8e6960f7 2024-11-24T04:50:11,762 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/b/fd92cb29daa942bc981c094d8e6960f7, entries=10, sequenceid=35, filesize=5.8 K 2024-11-24T04:50:11,763 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/c/9951b62874f64cbe91e6798ba7554407 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/c/9951b62874f64cbe91e6798ba7554407 2024-11-24T04:50:11,768 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/c/9951b62874f64cbe91e6798ba7554407, entries=10, sequenceid=35, filesize=5.8 K 2024-11-24T04:50:11,768 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for a4779f68db0aa47d61b8c14b2e1d389c in 63ms, sequenceid=35, compaction requested=false; wal=null 2024-11-24T04:50:11,769 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000035 2024-11-24T04:50:11,770 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,770 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,771 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:11,773 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:11,775 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-11-24T04:50:11,776 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened a4779f68db0aa47d61b8c14b2e1d389c; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62836627, jitterRate=-0.06366129219532013}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:11,776 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for a4779f68db0aa47d61b8c14b2e1d389c: Writing region info on filesystem at 1732423811689Initializing all the Stores at 1732423811690 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811690Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811691 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423811691Obtaining lock to block concurrent updates at 1732423811705 (+14 ms)Preparing flush snapshotting stores in a4779f68db0aa47d61b8c14b2e1d389c at 1732423811705Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1732423811705Flushing stores of testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. at 1732423811705Flushing a4779f68db0aa47d61b8c14b2e1d389c/b: creating writer at 1732423811705Flushing a4779f68db0aa47d61b8c14b2e1d389c/b: appending metadata at 1732423811719 (+14 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/b: closing flushed file at 1732423811720 (+1 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/c: creating writer at 1732423811731 (+11 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/c: appending metadata at 1732423811744 (+13 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/c: closing flushed file at 1732423811744Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1114d3e2: reopening flushed file at 1732423811756 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@528bd023: reopening flushed file at 1732423811762 (+6 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for a4779f68db0aa47d61b8c14b2e1d389c in 63ms, sequenceid=35, compaction requested=false; wal=null at 1732423811768 (+6 ms)Cleaning up temporary data from old regions at 1732423811770 (+2 ms)Region opened successfully at 1732423811776 (+6 ms) 2024-11-24T04:50:11,843 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672, size=0 (0bytes) 2024-11-24T04:50:11,843 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 might be still open, length is 0 2024-11-24T04:50:11,843 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 2024-11-24T04:50:11,844 WARN [IPC Server handler 0 on default port 38973 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 has not been closed. Lease recovery is in progress. RecoveryId = 1112 for block blk_1073741930_1109 2024-11-24T04:50:11,844 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 after 1ms 2024-11-24T04:50:14,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:56486 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:44795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56486 dst: /127.0.0.1:44795 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44795 remote=/127.0.0.1:56486]. Total timeout mills is 60000, 57727 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:14,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:58138 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:36429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58138 dst: /127.0.0.1:36429 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:14,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1724285291_22 at /127.0.0.1:58114 [Receiving block BP-1373737803-172.17.0.2-1732423771153:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:44605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58114 dst: /127.0.0.1:44605 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:14,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741930_1112 (size=2936) 2024-11-24T04:50:14,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741930_1112 (size=2936) 2024-11-24T04:50:15,845 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 after 4002ms 2024-11-24T04:50:15,985 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:16,011 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 took 4168ms 2024-11-24T04:50:16,014 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672; continuing. 2024-11-24T04:50:16,014 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 so closing down 2024-11-24T04:50:16,014 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-24T04:50:16,015 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1732423811672.temp 2024-11-24T04:50:16,016 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000037-wal.1732423811672.temp 2024-11-24T04:50:16,017 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-24T04:50:16,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741933_1113 (size=2944) 2024-11-24T04:50:16,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741933_1113 (size=2944) 2024-11-24T04:50:16,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741933_1113 (size=2944) 2024-11-24T04:50:16,026 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000037-wal.1732423811672.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-24T04:50:16,027 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000037-wal.1732423811672.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000066 2024-11-24T04:50:16,027 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672, size=0, length=0, corrupted=false, cancelled=false 2024-11-24T04:50:16,028 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672, journal: Splitting hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672, size=0 (0bytes) at 1732423811843Finishing writing output for hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 so closing down at 1732423816014 (+4171 ms)Creating recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000037-wal.1732423811672.temp at 1732423816016 (+2 ms)3 split writer threads finished at 1732423816017 (+1 ms)Closed recovered edits writer path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000037-wal.1732423811672.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1732423816026 (+9 ms)Rename recovered edits hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000037-wal.1732423811672.temp to hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000066 at 1732423816027 (+1 ms)Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672, size=0, length=0, corrupted=false, cancelled=false at 1732423816027 2024-11-24T04:50:16,029 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 to hdfs://localhost:38973/hbase/oldWALs/wal.1732423811672 2024-11-24T04:50:16,030 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000066 2024-11-24T04:50:16,030 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-24T04:50:16,032 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:38973/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506, archiveDir=hdfs://localhost:38973/hbase/oldWALs, maxLogs=32 2024-11-24T04:50:16,049 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423816033, exclude list is [], retry=0 2024-11-24T04:50:16,051 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36429,DS-ba3f6b62-eb9d-4f54-8dce-9853aad48e28,DISK] 2024-11-24T04:50:16,052 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44795,DS-92f8f17e-0a78-4e24-b7af-da7223272a99,DISK] 2024-11-24T04:50:16,052 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44605,DS-f39319b7-5869-4767-a451-c3ef9ef6aac6,DISK] 2024-11-24T04:50:16,055 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423816033 2024-11-24T04:50:16,055 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:37591:37591),(127.0.0.1/127.0.0.1:36007:36007)] 2024-11-24T04:50:16,055 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T04:50:16,057 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,058 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName a 2024-11-24T04:50:16,058 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:16,066 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/a/18e104196b59495d8f6359e9fce708f3 2024-11-24T04:50:16,067 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:16,067 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,068 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName b 2024-11-24T04:50:16,068 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:16,073 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/b/fd92cb29daa942bc981c094d8e6960f7 2024-11-24T04:50:16,073 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:16,074 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,075 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4779f68db0aa47d61b8c14b2e1d389c columnFamilyName c 2024-11-24T04:50:16,075 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T04:50:16,081 DEBUG [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/c/9951b62874f64cbe91e6798ba7554407 2024-11-24T04:50:16,081 INFO [StoreOpener-a4779f68db0aa47d61b8c14b2e1d389c-1 {}] regionserver.HStore(327): Store=a4779f68db0aa47d61b8c14b2e1d389c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T04:50:16,081 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,082 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,083 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,084 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000066 2024-11-24T04:50:16,085 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-24T04:50:16,089 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000066 2024-11-24T04:50:16,089 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a4779f68db0aa47d61b8c14b2e1d389c 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-24T04:50:16,111 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/a/67f242b4d2304da782fc04bc07c44fb2 is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1732423811783/Put/seqid=0 2024-11-24T04:50:16,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741935_1115 (size=5958) 2024-11-24T04:50:16,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741935_1115 (size=5958) 2024-11-24T04:50:16,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741935_1115 (size=5958) 2024-11-24T04:50:16,434 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-11-24T04:50:16,434 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-11-24T04:50:16,435 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-11-24T04:50:16,435 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-11-24T04:50:16,520 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/a/67f242b4d2304da782fc04bc07c44fb2 2024-11-24T04:50:16,538 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/b/85e8f3d032694bf29835bddeeaa206bb is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1732423811789/Put/seqid=0 2024-11-24T04:50:16,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741936_1116 (size=5958) 2024-11-24T04:50:16,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741936_1116 (size=5958) 2024-11-24T04:50:16,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741936_1116 (size=5958) 2024-11-24T04:50:16,550 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/b/85e8f3d032694bf29835bddeeaa206bb 2024-11-24T04:50:16,566 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/c/210f06b2732042f3a93966d94b15a389 is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1732423811798/Put/seqid=0 2024-11-24T04:50:16,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741937_1117 (size=5958) 2024-11-24T04:50:16,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741937_1117 (size=5958) 2024-11-24T04:50:16,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741937_1117 (size=5958) 2024-11-24T04:50:16,577 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/c/210f06b2732042f3a93966d94b15a389 2024-11-24T04:50:16,582 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/a/67f242b4d2304da782fc04bc07c44fb2 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/a/67f242b4d2304da782fc04bc07c44fb2 2024-11-24T04:50:16,586 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/a/67f242b4d2304da782fc04bc07c44fb2, entries=10, sequenceid=66, filesize=5.8 K 2024-11-24T04:50:16,587 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/b/85e8f3d032694bf29835bddeeaa206bb as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/b/85e8f3d032694bf29835bddeeaa206bb 2024-11-24T04:50:16,591 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/b/85e8f3d032694bf29835bddeeaa206bb, entries=10, sequenceid=66, filesize=5.8 K 2024-11-24T04:50:16,592 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/.tmp/c/210f06b2732042f3a93966d94b15a389 as hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/c/210f06b2732042f3a93966d94b15a389 2024-11-24T04:50:16,597 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/c/210f06b2732042f3a93966d94b15a389, entries=10, sequenceid=66, filesize=5.8 K 2024-11-24T04:50:16,597 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for a4779f68db0aa47d61b8c14b2e1d389c in 508ms, sequenceid=66, compaction requested=false; wal=null 2024-11-24T04:50:16,597 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/0000000000000000066 2024-11-24T04:50:16,598 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,599 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,599 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-24T04:50:16,601 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for a4779f68db0aa47d61b8c14b2e1d389c 2024-11-24T04:50:16,603 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/hbase/data/default/testReplayEditsWrittenViaHRegion/a4779f68db0aa47d61b8c14b2e1d389c/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-11-24T04:50:16,603 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened a4779f68db0aa47d61b8c14b2e1d389c; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59949179, jitterRate=-0.10668762028217316}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-24T04:50:16,604 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for a4779f68db0aa47d61b8c14b2e1d389c: Writing region info on filesystem at 1732423816055Initializing all the Stores at 1732423816056 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423816056Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423816057 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732423816057Obtaining lock to block concurrent updates at 1732423816089 (+32 ms)Preparing flush snapshotting stores in a4779f68db0aa47d61b8c14b2e1d389c at 1732423816089Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1732423816090 (+1 ms)Flushing stores of testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. at 1732423816090Flushing a4779f68db0aa47d61b8c14b2e1d389c/a: creating writer at 1732423816090Flushing a4779f68db0aa47d61b8c14b2e1d389c/a: appending metadata at 1732423816110 (+20 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/a: closing flushed file at 1732423816110Flushing a4779f68db0aa47d61b8c14b2e1d389c/b: creating writer at 1732423816524 (+414 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/b: appending metadata at 1732423816537 (+13 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/b: closing flushed file at 1732423816537Flushing a4779f68db0aa47d61b8c14b2e1d389c/c: creating writer at 1732423816554 (+17 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/c: appending metadata at 1732423816566 (+12 ms)Flushing a4779f68db0aa47d61b8c14b2e1d389c/c: closing flushed file at 1732423816566Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37a66148: reopening flushed file at 1732423816581 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3083a867: reopening flushed file at 1732423816586 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c4eaca0: reopening flushed file at 1732423816592 (+6 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for a4779f68db0aa47d61b8c14b2e1d389c in 508ms, sequenceid=66, compaction requested=false; wal=null at 1732423816597 (+5 ms)Cleaning up temporary data from old regions at 1732423816599 (+2 ms)Region opened successfully at 1732423816604 (+5 ms) 2024-11-24T04:50:16,617 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing a4779f68db0aa47d61b8c14b2e1d389c, disabling compactions & flushes 2024-11-24T04:50:16,617 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:16,617 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:16,617 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. after waiting 0 ms 2024-11-24T04:50:16,617 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:16,620 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1732423811508.a4779f68db0aa47d61b8c14b2e1d389c. 2024-11-24T04:50:16,620 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for a4779f68db0aa47d61b8c14b2e1d389c: Waiting for close lock at 1732423816617Disabling compacts and flushes for region at 1732423816617Disabling writes for close at 1732423816617Writing region close event to WAL at 1732423816620 (+3 ms)Closed at 1732423816620 2024-11-24T04:50:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741934_1114 (size=95) 2024-11-24T04:50:16,624 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423816033 not finished, retry = 0 2024-11-24T04:50:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741934_1114 (size=95) 2024-11-24T04:50:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741934_1114 (size=95) 2024-11-24T04:50:16,727 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-24T04:50:16,727 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1732423816033) 2024-11-24T04:50:16,740 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=440 (was 435) Potentially hanging thread: IPC Client (139959168) connection to localhost/127.0.0.1:38973 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-579956906_22 at /127.0.0.1:35216 [Waiting for operation #25] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-579956906_22 at /127.0.0.1:56528 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-579956906_22 at /127.0.0.1:34446 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:38973 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1390 (was 1324) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=484 (was 527), ProcessCount=11 (was 11), AvailableMemoryMB=10764 (was 10812) 2024-11-24T04:50:16,740 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1390 is superior to 1024 2024-11-24T04:50:16,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T04:50:16,740 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T04:50:16,740 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T04:50:16,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:50:16,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:50:16,741 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T04:50:16,741 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T04:50:16,741 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=762354849, stopped=false 2024-11-24T04:50:16,742 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4464c5b832df,34701,1732423776004 2024-11-24T04:50:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T04:50:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T04:50:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T04:50:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:50:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:50:16,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:50:16,803 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T04:50:16,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T04:50:16,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T04:50:16,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T04:50:16,806 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T04:50:16,806 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T04:50:16,807 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:50:16,807 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4464c5b832df,39197,1732423777462' ***** 2024-11-24T04:50:16,807 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T04:50:16,807 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4464c5b832df,46039,1732423777593' ***** 2024-11-24T04:50:16,807 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T04:50:16,808 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T04:50:16,808 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T04:50:16,808 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T04:50:16,808 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T04:50:16,808 INFO [RS:0;4464c5b832df:39197 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T04:50:16,808 INFO [RS:1;4464c5b832df:46039 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T04:50:16,808 INFO [RS:0;4464c5b832df:39197 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T04:50:16,808 INFO [RS:1;4464c5b832df:46039 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T04:50:16,808 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(3091): Received CLOSE for 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:50:16,808 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(959): stopping server 4464c5b832df,46039,1732423777593 2024-11-24T04:50:16,808 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(959): stopping server 4464c5b832df,39197,1732423777462 2024-11-24T04:50:16,808 INFO [RS:1;4464c5b832df:46039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T04:50:16,808 INFO [RS:0;4464c5b832df:39197 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T04:50:16,808 INFO [RS:1;4464c5b832df:46039 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;4464c5b832df:46039. 2024-11-24T04:50:16,808 INFO [RS:0;4464c5b832df:39197 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4464c5b832df:39197. 2024-11-24T04:50:16,808 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7fab4f06c042c658eda5a15104ff7acf, disabling compactions & flushes 2024-11-24T04:50:16,809 DEBUG [RS:0;4464c5b832df:39197 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T04:50:16,809 DEBUG [RS:1;4464c5b832df:46039 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T04:50:16,809 DEBUG [RS:0;4464c5b832df:39197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:50:16,809 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:50:16,809 DEBUG [RS:1;4464c5b832df:46039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:50:16,809 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:50:16,809 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T04:50:16,809 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1325): Online Regions={7fab4f06c042c658eda5a15104ff7acf=testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf.} 2024-11-24T04:50:16,809 INFO [RS:1;4464c5b832df:46039 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T04:50:16,809 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. after waiting 0 ms 2024-11-24T04:50:16,809 INFO [RS:1;4464c5b832df:46039 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T04:50:16,809 DEBUG [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1351): Waiting on 7fab4f06c042c658eda5a15104ff7acf 2024-11-24T04:50:16,809 INFO [RS:1;4464c5b832df:46039 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T04:50:16,809 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:50:16,809 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T04:50:16,809 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T04:50:16,809 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T04:50:16,809 DEBUG [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T04:50:16,809 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T04:50:16,809 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T04:50:16,809 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T04:50:16,809 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T04:50:16,810 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T04:50:16,810 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=6.86 KB heapSize=11.45 KB 2024-11-24T04:50:16,813 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/7fab4f06c042c658eda5a15104ff7acf/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-11-24T04:50:16,814 INFO [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:50:16,814 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7fab4f06c042c658eda5a15104ff7acf: Waiting for close lock at 1732423816808Running coprocessor pre-close hooks at 1732423816808Disabling compacts and flushes for region at 1732423816808Disabling writes for close at 1732423816809 (+1 ms)Writing region close event to WAL at 1732423816810 (+1 ms)Running coprocessor post-close hooks at 1732423816814 (+4 ms)Closed at 1732423816814 2024-11-24T04:50:16,814 DEBUG [RS_CLOSE_REGION-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf. 2024-11-24T04:50:16,828 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/info/5f9ec38fafd54c7c9096a0226428c5e9 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1732423792960.7fab4f06c042c658eda5a15104ff7acf./info:regioninfo/1732423796088/Put/seqid=0 2024-11-24T04:50:16,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741938_1118 (size=8243) 2024-11-24T04:50:16,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741938_1118 (size=8243) 2024-11-24T04:50:16,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741938_1118 (size=8243) 2024-11-24T04:50:16,834 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.65 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/info/5f9ec38fafd54c7c9096a0226428c5e9 2024-11-24T04:50:16,849 INFO [regionserver/4464c5b832df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T04:50:16,849 INFO [regionserver/4464c5b832df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T04:50:16,852 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/ns/760a9afeb5c54cf5a58d68f0f4ece7a9 is 43, key is default/ns:d/1732423780126/Put/seqid=0 2024-11-24T04:50:16,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741939_1119 (size=5153) 2024-11-24T04:50:16,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741939_1119 (size=5153) 2024-11-24T04:50:16,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741939_1119 (size=5153) 2024-11-24T04:50:16,858 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/ns/760a9afeb5c54cf5a58d68f0f4ece7a9 2024-11-24T04:50:16,877 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/table/5390eac8ec1a4608a68d15cfe4901585 is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1732423793394/Put/seqid=0 2024-11-24T04:50:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741940_1120 (size=5431) 2024-11-24T04:50:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741940_1120 (size=5431) 2024-11-24T04:50:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741940_1120 (size=5431) 2024-11-24T04:50:16,883 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/table/5390eac8ec1a4608a68d15cfe4901585 2024-11-24T04:50:16,889 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/info/5f9ec38fafd54c7c9096a0226428c5e9 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/info/5f9ec38fafd54c7c9096a0226428c5e9 2024-11-24T04:50:16,894 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/info/5f9ec38fafd54c7c9096a0226428c5e9, entries=18, sequenceid=21, filesize=8.0 K 2024-11-24T04:50:16,895 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/ns/760a9afeb5c54cf5a58d68f0f4ece7a9 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/ns/760a9afeb5c54cf5a58d68f0f4ece7a9 2024-11-24T04:50:16,900 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/ns/760a9afeb5c54cf5a58d68f0f4ece7a9, entries=2, sequenceid=21, filesize=5.0 K 2024-11-24T04:50:16,901 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/.tmp/table/5390eac8ec1a4608a68d15cfe4901585 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/table/5390eac8ec1a4608a68d15cfe4901585 2024-11-24T04:50:16,905 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/table/5390eac8ec1a4608a68d15cfe4901585, entries=2, sequenceid=21, filesize=5.3 K 2024-11-24T04:50:16,906 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=21, compaction requested=false 2024-11-24T04:50:16,910 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-24T04:50:16,911 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T04:50:16,911 INFO [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T04:50:16,911 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732423816809Running coprocessor pre-close hooks at 1732423816809Disabling compacts and flushes for region at 1732423816809Disabling writes for close at 1732423816810 (+1 ms)Obtaining lock to block concurrent updates at 1732423816810Preparing flush snapshotting stores in 1588230740 at 1732423816810Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=7029, getHeapSize=11664, getOffHeapSize=0, getCellsCount=48 at 1732423816810Flushing stores of hbase:meta,,1.1588230740 at 1732423816811 (+1 ms)Flushing 1588230740/info: creating writer at 1732423816811Flushing 1588230740/info: appending metadata at 1732423816827 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732423816827Flushing 1588230740/ns: creating writer at 1732423816839 (+12 ms)Flushing 1588230740/ns: appending metadata at 1732423816851 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1732423816851Flushing 1588230740/table: creating writer at 1732423816863 (+12 ms)Flushing 1588230740/table: appending metadata at 1732423816876 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732423816876Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c425832: reopening flushed file at 1732423816888 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61887d84: reopening flushed file at 1732423816895 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f9e30da: reopening flushed file at 1732423816900 (+5 ms)Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=21, compaction requested=false at 1732423816906 (+6 ms)Writing region close event to WAL at 1732423816907 (+1 ms)Running coprocessor post-close hooks at 1732423816910 (+3 ms)Closed at 1732423816911 (+1 ms) 2024-11-24T04:50:16,911 DEBUG [RS_CLOSE_META-regionserver/4464c5b832df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T04:50:17,009 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(976): stopping server 4464c5b832df,39197,1732423777462; all regions closed. 2024-11-24T04:50:17,010 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(976): stopping server 4464c5b832df,46039,1732423777593; all regions closed. 2024-11-24T04:50:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741834_1010 (size=2180) 2024-11-24T04:50:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741834_1010 (size=2180) 2024-11-24T04:50:17,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741836_1012 (size=4715) 2024-11-24T04:50:17,016 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/WALs/4464c5b832df,46039,1732423777593/4464c5b832df%2C46039%2C1732423777593.meta.1732423779948.meta not finished, retry = 0 2024-11-24T04:50:17,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741836_1012 (size=4715) 2024-11-24T04:50:17,019 DEBUG [RS:0;4464c5b832df:39197 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs 2024-11-24T04:50:17,019 INFO [RS:0;4464c5b832df:39197 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4464c5b832df%2C39197%2C1732423777462:(num 1732423779373) 2024-11-24T04:50:17,020 DEBUG [RS:0;4464c5b832df:39197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:50:17,020 INFO [RS:0;4464c5b832df:39197 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T04:50:17,020 INFO [RS:0;4464c5b832df:39197 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T04:50:17,020 INFO [RS:0;4464c5b832df:39197 {}] hbase.ChoreService(370): Chore service for: regionserver/4464c5b832df:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T04:50:17,020 INFO [RS:0;4464c5b832df:39197 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T04:50:17,020 INFO [RS:0;4464c5b832df:39197 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T04:50:17,020 INFO [regionserver/4464c5b832df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T04:50:17,020 INFO [RS:0;4464c5b832df:39197 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T04:50:17,020 INFO [RS:0;4464c5b832df:39197 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T04:50:17,021 INFO [RS:0;4464c5b832df:39197 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39197 2024-11-24T04:50:17,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4464c5b832df,39197,1732423777462 2024-11-24T04:50:17,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T04:50:17,033 INFO [RS:0;4464c5b832df:39197 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T04:50:17,044 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4464c5b832df,39197,1732423777462] 2024-11-24T04:50:17,054 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4464c5b832df,39197,1732423777462 already deleted, retry=false 2024-11-24T04:50:17,055 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4464c5b832df,39197,1732423777462 expired; onlineServers=1 2024-11-24T04:50:17,121 DEBUG [RS:1;4464c5b832df:46039 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs 2024-11-24T04:50:17,121 INFO [RS:1;4464c5b832df:46039 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4464c5b832df%2C46039%2C1732423777593.meta:.meta(num 1732423779948) 2024-11-24T04:50:17,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741833_1009 (size=95) 2024-11-24T04:50:17,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741833_1009 (size=95) 2024-11-24T04:50:17,130 DEBUG [RS:1;4464c5b832df:46039 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/oldWALs 2024-11-24T04:50:17,130 INFO [RS:1;4464c5b832df:46039 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4464c5b832df%2C46039%2C1732423777593:(num 1732423779373) 2024-11-24T04:50:17,130 DEBUG [RS:1;4464c5b832df:46039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T04:50:17,130 INFO [RS:1;4464c5b832df:46039 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T04:50:17,131 INFO [RS:1;4464c5b832df:46039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T04:50:17,131 INFO [RS:1;4464c5b832df:46039 {}] hbase.ChoreService(370): Chore service for: regionserver/4464c5b832df:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T04:50:17,131 INFO [RS:1;4464c5b832df:46039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T04:50:17,131 INFO [regionserver/4464c5b832df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T04:50:17,131 INFO [RS:1;4464c5b832df:46039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46039 2024-11-24T04:50:17,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T04:50:17,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4464c5b832df,46039,1732423777593 2024-11-24T04:50:17,142 INFO [RS:1;4464c5b832df:46039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T04:50:17,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:50:17,144 INFO [RS:0;4464c5b832df:39197 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T04:50:17,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39197-0x1016b2cef8a0001, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:50:17,144 INFO [RS:0;4464c5b832df:39197 {}] regionserver.HRegionServer(1031): Exiting; stopping=4464c5b832df,39197,1732423777462; zookeeper connection closed. 2024-11-24T04:50:17,145 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4125085a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4125085a 2024-11-24T04:50:17,152 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4464c5b832df,46039,1732423777593] 2024-11-24T04:50:17,163 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4464c5b832df,46039,1732423777593 already deleted, retry=false 2024-11-24T04:50:17,163 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4464c5b832df,46039,1732423777593 expired; onlineServers=0 2024-11-24T04:50:17,163 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4464c5b832df,34701,1732423776004' ***** 2024-11-24T04:50:17,163 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T04:50:17,163 INFO [M:0;4464c5b832df:34701 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T04:50:17,163 INFO [M:0;4464c5b832df:34701 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T04:50:17,163 DEBUG [M:0;4464c5b832df:34701 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T04:50:17,163 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T04:50:17,163 DEBUG [M:0;4464c5b832df:34701 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T04:50:17,163 DEBUG [master/4464c5b832df:0:becomeActiveMaster-HFileCleaner.small.0-1732423778949 {}] cleaner.HFileCleaner(306): Exit Thread[master/4464c5b832df:0:becomeActiveMaster-HFileCleaner.small.0-1732423778949,5,FailOnTimeoutGroup] 2024-11-24T04:50:17,163 DEBUG [master/4464c5b832df:0:becomeActiveMaster-HFileCleaner.large.0-1732423778948 {}] cleaner.HFileCleaner(306): Exit Thread[master/4464c5b832df:0:becomeActiveMaster-HFileCleaner.large.0-1732423778948,5,FailOnTimeoutGroup] 2024-11-24T04:50:17,164 INFO [M:0;4464c5b832df:34701 {}] hbase.ChoreService(370): Chore service for: master/4464c5b832df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T04:50:17,164 INFO [M:0;4464c5b832df:34701 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T04:50:17,164 DEBUG [M:0;4464c5b832df:34701 {}] master.HMaster(1795): Stopping service threads 2024-11-24T04:50:17,164 INFO [M:0;4464c5b832df:34701 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T04:50:17,164 INFO [M:0;4464c5b832df:34701 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T04:50:17,164 INFO [M:0;4464c5b832df:34701 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T04:50:17,164 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T04:50:17,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T04:50:17,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T04:50:17,174 DEBUG [M:0;4464c5b832df:34701 {}] zookeeper.ZKUtil(347): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T04:50:17,174 WARN [M:0;4464c5b832df:34701 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T04:50:17,174 INFO [M:0;4464c5b832df:34701 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/.lastflushedseqids 2024-11-24T04:50:17,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741941_1121 (size=138) 2024-11-24T04:50:17,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741941_1121 (size=138) 2024-11-24T04:50:17,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741941_1121 (size=138) 2024-11-24T04:50:17,187 INFO [M:0;4464c5b832df:34701 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T04:50:17,187 INFO [M:0;4464c5b832df:34701 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T04:50:17,187 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T04:50:17,187 INFO [M:0;4464c5b832df:34701 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:50:17,187 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:50:17,187 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T04:50:17,187 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:50:17,187 INFO [M:0;4464c5b832df:34701 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=68.33 KB heapSize=83.72 KB 2024-11-24T04:50:17,201 DEBUG [M:0;4464c5b832df:34701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6182fb31ab1e4bffa918299bde629ae5 is 82, key is hbase:meta,,1/info:regioninfo/1732423780038/Put/seqid=0 2024-11-24T04:50:17,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741942_1122 (size=5672) 2024-11-24T04:50:17,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741942_1122 (size=5672) 2024-11-24T04:50:17,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741942_1122 (size=5672) 2024-11-24T04:50:17,207 INFO [M:0;4464c5b832df:34701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6182fb31ab1e4bffa918299bde629ae5 2024-11-24T04:50:17,225 DEBUG [M:0;4464c5b832df:34701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb176df5160342efa1c5a582dc7f83b2 is 1075, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732423793402/Put/seqid=0 2024-11-24T04:50:17,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741943_1123 (size=7754) 2024-11-24T04:50:17,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741943_1123 (size=7754) 2024-11-24T04:50:17,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741943_1123 (size=7754) 2024-11-24T04:50:17,231 INFO [M:0;4464c5b832df:34701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.60 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb176df5160342efa1c5a582dc7f83b2 2024-11-24T04:50:17,236 INFO [M:0;4464c5b832df:34701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bb176df5160342efa1c5a582dc7f83b2 2024-11-24T04:50:17,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:50:17,253 INFO [RS:1;4464c5b832df:46039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T04:50:17,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46039-0x1016b2cef8a0002, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:50:17,253 INFO [RS:1;4464c5b832df:46039 {}] regionserver.HRegionServer(1031): Exiting; stopping=4464c5b832df,46039,1732423777593; zookeeper connection closed. 2024-11-24T04:50:17,253 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3b7c75ae {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3b7c75ae 2024-11-24T04:50:17,253 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-24T04:50:17,256 DEBUG [M:0;4464c5b832df:34701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca8694ec23d9478092e34f0f9bfa3377 is 69, key is 4464c5b832df,39197,1732423777462/rs:state/1732423779051/Put/seqid=0 2024-11-24T04:50:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741944_1124 (size=5440) 2024-11-24T04:50:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741944_1124 (size=5440) 2024-11-24T04:50:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741944_1124 (size=5440) 2024-11-24T04:50:17,265 INFO [M:0;4464c5b832df:34701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca8694ec23d9478092e34f0f9bfa3377 2024-11-24T04:50:17,270 INFO [M:0;4464c5b832df:34701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ca8694ec23d9478092e34f0f9bfa3377 2024-11-24T04:50:17,272 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6182fb31ab1e4bffa918299bde629ae5 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6182fb31ab1e4bffa918299bde629ae5 2024-11-24T04:50:17,277 INFO [M:0;4464c5b832df:34701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6182fb31ab1e4bffa918299bde629ae5, entries=8, sequenceid=168, filesize=5.5 K 2024-11-24T04:50:17,278 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb176df5160342efa1c5a582dc7f83b2 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb176df5160342efa1c5a582dc7f83b2 2024-11-24T04:50:17,285 INFO [M:0;4464c5b832df:34701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bb176df5160342efa1c5a582dc7f83b2 2024-11-24T04:50:17,285 INFO [M:0;4464c5b832df:34701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb176df5160342efa1c5a582dc7f83b2, entries=17, sequenceid=168, filesize=7.6 K 2024-11-24T04:50:17,287 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca8694ec23d9478092e34f0f9bfa3377 as hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ca8694ec23d9478092e34f0f9bfa3377 2024-11-24T04:50:17,296 INFO [M:0;4464c5b832df:34701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ca8694ec23d9478092e34f0f9bfa3377 2024-11-24T04:50:17,296 INFO [M:0;4464c5b832df:34701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38973/user/jenkins/test-data/60c886e5-4a56-341a-3188-c60331fa7e6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ca8694ec23d9478092e34f0f9bfa3377, entries=3, sequenceid=168, filesize=5.3 K 2024-11-24T04:50:17,298 INFO [M:0;4464c5b832df:34701 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=168, compaction requested=false 2024-11-24T04:50:17,299 INFO [M:0;4464c5b832df:34701 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T04:50:17,299 DEBUG [M:0;4464c5b832df:34701 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732423817187Disabling compacts and flushes for region at 1732423817187Disabling writes for close at 1732423817187Obtaining lock to block concurrent updates at 1732423817187Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732423817187Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=69972, getHeapSize=85664, getOffHeapSize=0, getCellsCount=195 at 1732423817187Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732423817188 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732423817188Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732423817201 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732423817201Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732423817211 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732423817224 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732423817224Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732423817236 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732423817255 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732423817255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a53b4f9: reopening flushed file at 1732423817271 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@613e2b68: reopening flushed file at 1732423817277 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e6b1b0f: reopening flushed file at 1732423817285 (+8 ms)Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=168, compaction requested=false at 1732423817298 (+13 ms)Writing region close event to WAL at 1732423817299 (+1 ms)Closed at 1732423817299 2024-11-24T04:50:17,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44605 is added to blk_1073741830_1006 (size=56530) 2024-11-24T04:50:17,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741830_1006 (size=56530) 2024-11-24T04:50:17,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36429 is added to blk_1073741830_1006 (size=56530) 2024-11-24T04:50:17,304 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T04:50:17,304 INFO [M:0;4464c5b832df:34701 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T04:50:17,304 INFO [M:0;4464c5b832df:34701 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34701 2024-11-24T04:50:17,305 INFO [M:0;4464c5b832df:34701 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T04:50:17,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:50:17,418 INFO [M:0;4464c5b832df:34701 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T04:50:17,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x1016b2cef8a0000, quorum=127.0.0.1:55024, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T04:50:17,423 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796670 with renewLeaseKey: DEFAULT_16655 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796670 (inode 16655) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1732423796400/wal.1732423796670 (inode 16655) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-11-24T04:50:17,424 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1732423788459/wal.1732423788663 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:17,424 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1732423811506/wal.1732423811672 with renewLeaseKey: DEFAULT_16767 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:17,426 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423806090 with renewLeaseKey: DEFAULT_16678 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423806090 (inode 16678) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1732423796831/wal.1732423806090 (inode 16678) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-11-24T04:50:17,427 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1732423781345/wal.1732423781415 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:17,429 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806831 with renewLeaseKey: DEFAULT_16704 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806831 (inode 16704) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1732423806328/wal.1732423806831 (inode 16704) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-11-24T04:50:17,430 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1732423806979/wal.1732423807156 with renewLeaseKey: DEFAULT_16726 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T04:50:17,432 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal.1732423781216 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal.1732423781216 (inode 16485) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1732423781030/wal.1732423781216 (inode 16485) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-11-24T04:50:17,434 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal.1732423780767 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal.1732423780767 (inode 16462) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1732423780452/wal.1732423780767 (inode 16462) Holder DFSClient_NONMAPREDUCE_1724285291_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-11-24T04:50:17,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ac22c80{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T04:50:17,440 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@487c0a0f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T04:50:17,440 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T04:50:17,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6334c715{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T04:50:17,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28786e11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,STOPPED} 2024-11-24T04:50:17,443 WARN [BP-1373737803-172.17.0.2-1732423771153 heartbeating to localhost/127.0.0.1:38973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T04:50:17,443 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T04:50:17,443 WARN [BP-1373737803-172.17.0.2-1732423771153 heartbeating to localhost/127.0.0.1:38973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1373737803-172.17.0.2-1732423771153 (Datanode Uuid 6240c108-d1b0-413f-8591-50ccf623f7da) service to localhost/127.0.0.1:38973 2024-11-24T04:50:17,443 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T04:50:17,444 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data5/current/BP-1373737803-172.17.0.2-1732423771153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T04:50:17,444 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data6/current/BP-1373737803-172.17.0.2-1732423771153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T04:50:17,445 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T04:50:17,447 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@42b50423{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T04:50:17,447 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cb1f0d1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T04:50:17,447 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T04:50:17,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40ee7008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T04:50:17,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b863aae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,STOPPED} 2024-11-24T04:50:17,451 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T04:50:17,451 WARN [BP-1373737803-172.17.0.2-1732423771153 heartbeating to localhost/127.0.0.1:38973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T04:50:17,451 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T04:50:17,451 WARN [BP-1373737803-172.17.0.2-1732423771153 heartbeating to localhost/127.0.0.1:38973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1373737803-172.17.0.2-1732423771153 (Datanode Uuid 0a688774-b62b-4628-a4b3-91e6af29ec07) service to localhost/127.0.0.1:38973 2024-11-24T04:50:17,452 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data3/current/BP-1373737803-172.17.0.2-1732423771153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T04:50:17,452 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data4/current/BP-1373737803-172.17.0.2-1732423771153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T04:50:17,453 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T04:50:17,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ddf0278{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T04:50:17,459 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62465dbd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T04:50:17,459 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T04:50:17,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e19ad24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T04:50:17,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1647e812{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,STOPPED} 2024-11-24T04:50:17,461 WARN [BP-1373737803-172.17.0.2-1732423771153 heartbeating to localhost/127.0.0.1:38973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T04:50:17,461 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T04:50:17,461 WARN [BP-1373737803-172.17.0.2-1732423771153 heartbeating to localhost/127.0.0.1:38973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1373737803-172.17.0.2-1732423771153 (Datanode Uuid 4a6ddd70-8fc5-4259-a718-7371409e11f6) service to localhost/127.0.0.1:38973 2024-11-24T04:50:17,461 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T04:50:17,461 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data1/current/BP-1373737803-172.17.0.2-1732423771153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T04:50:17,462 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/cluster_2b4e4e59-401c-4e9a-1f98-9a1c4a75cde2/data/data2/current/BP-1373737803-172.17.0.2-1732423771153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T04:50:17,462 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T04:50:17,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5602fba9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T04:50:17,469 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@783558f7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T04:50:17,469 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T04:50:17,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4763bc82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T04:50:17,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e216de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6bd6fbdc-2b86-1a1f-0da4-b1aeb3f1a9fa/hadoop.log.dir/,STOPPED} 2024-11-24T04:50:17,481 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T04:50:17,537 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down