2024-12-07 01:24:06,308 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-07 01:24:06,320 main DEBUG Took 0.010115 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 01:24:06,321 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 01:24:06,322 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 01:24:06,323 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 01:24:06,325 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,336 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 01:24:06,354 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,356 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,357 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,358 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,358 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,360 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,360 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,361 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,362 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,363 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,363 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,364 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,364 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,365 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,365 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,366 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,366 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,367 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,367 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,368 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,368 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,369 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,369 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 01:24:06,370 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,370 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 01:24:06,372 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 01:24:06,374 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 01:24:06,376 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 01:24:06,377 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 01:24:06,379 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 01:24:06,379 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 01:24:06,388 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 01:24:06,391 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 01:24:06,393 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 01:24:06,393 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 01:24:06,394 main DEBUG createAppenders(={Console}) 2024-12-07 01:24:06,394 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-12-07 01:24:06,395 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-07 01:24:06,395 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-12-07 01:24:06,396 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 01:24:06,396 main DEBUG OutputStream closed 2024-12-07 01:24:06,396 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 01:24:06,396 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 01:24:06,397 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-12-07 01:24:06,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 01:24:06,467 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 01:24:06,467 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 01:24:06,468 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 01:24:06,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 01:24:06,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 01:24:06,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 01:24:06,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 01:24:06,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 01:24:06,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 01:24:06,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 01:24:06,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 01:24:06,471 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 01:24:06,471 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 01:24:06,471 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 01:24:06,471 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 01:24:06,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 01:24:06,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 01:24:06,474 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 01:24:06,475 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-12-07 01:24:06,475 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 01:24:06,475 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-12-07T01:24:06,735 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c 2024-12-07 01:24:06,738 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 01:24:06,739 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T01:24:06,747 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-12-07T01:24:06,754 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-12-07T01:24:06,778 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T01:24:06,826 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T01:24:06,826 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T01:24:06,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T01:24:06,858 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315, deleteOnExit=true 2024-12-07T01:24:06,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T01:24:06,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/test.cache.data in system properties and HBase conf 2024-12-07T01:24:06,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T01:24:06,861 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir in system properties and HBase conf 2024-12-07T01:24:06,861 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T01:24:06,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T01:24:06,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T01:24:06,949 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T01:24:07,060 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T01:24:07,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T01:24:07,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T01:24:07,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T01:24:07,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T01:24:07,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T01:24:07,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T01:24:07,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T01:24:07,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T01:24:07,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T01:24:07,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/nfs.dump.dir in system properties and HBase conf 2024-12-07T01:24:07,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/java.io.tmpdir in system properties and HBase conf 2024-12-07T01:24:07,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T01:24:07,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T01:24:07,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T01:24:08,762 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T01:24:08,830 INFO [Time-limited test {}] log.Log(170): Logging initialized @3277ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T01:24:08,898 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T01:24:08,955 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T01:24:08,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T01:24:08,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T01:24:08,974 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T01:24:08,987 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T01:24:08,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,AVAILABLE} 2024-12-07T01:24:08,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T01:24:09,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/java.io.tmpdir/jetty-localhost-43833-hadoop-hdfs-3_4_1-tests_jar-_-any-2070976439561689566/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T01:24:09,160 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:43833} 2024-12-07T01:24:09,160 INFO [Time-limited test {}] server.Server(415): Started @3607ms 2024-12-07T01:24:10,731 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T01:24:10,738 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T01:24:10,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T01:24:10,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T01:24:10,741 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T01:24:10,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,AVAILABLE} 2024-12-07T01:24:10,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T01:24:10,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@330740de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/java.io.tmpdir/jetty-localhost-35303-hadoop-hdfs-3_4_1-tests_jar-_-any-12920558310656546117/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T01:24:10,838 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:35303} 2024-12-07T01:24:10,838 INFO [Time-limited test {}] server.Server(415): Started @5285ms 2024-12-07T01:24:10,885 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T01:24:11,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T01:24:11,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T01:24:11,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T01:24:11,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T01:24:11,018 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T01:24:11,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,AVAILABLE} 2024-12-07T01:24:11,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T01:24:11,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bd427b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/java.io.tmpdir/jetty-localhost-44617-hadoop-hdfs-3_4_1-tests_jar-_-any-11000553475244987791/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T01:24:11,152 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:44617} 2024-12-07T01:24:11,152 INFO [Time-limited test {}] server.Server(415): Started @5599ms 2024-12-07T01:24:11,155 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T01:24:11,218 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T01:24:11,222 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T01:24:11,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T01:24:11,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T01:24:11,225 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T01:24:11,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,AVAILABLE} 2024-12-07T01:24:11,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T01:24:11,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35f1150e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/java.io.tmpdir/jetty-localhost-45459-hadoop-hdfs-3_4_1-tests_jar-_-any-9230080253788216643/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T01:24:11,324 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:45459} 2024-12-07T01:24:11,324 INFO [Time-limited test {}] server.Server(415): Started @5771ms 2024-12-07T01:24:11,326 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T01:24:13,145 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data1/current/BP-1847206258-172.17.0.3-1733534647641/current, will proceed with Du for space computation calculation, 2024-12-07T01:24:13,145 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data2/current/BP-1847206258-172.17.0.3-1733534647641/current, will proceed with Du for space computation calculation, 2024-12-07T01:24:13,145 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data4/current/BP-1847206258-172.17.0.3-1733534647641/current, will proceed with Du for space computation calculation, 2024-12-07T01:24:13,145 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data3/current/BP-1847206258-172.17.0.3-1733534647641/current, will proceed with Du for space computation calculation, 2024-12-07T01:24:13,175 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T01:24:13,175 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T01:24:13,219 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data5/current/BP-1847206258-172.17.0.3-1733534647641/current, will proceed with Du for space computation calculation, 2024-12-07T01:24:13,219 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data6/current/BP-1847206258-172.17.0.3-1733534647641/current, will proceed with Du for space computation calculation, 2024-12-07T01:24:13,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b1549ea03503e15 with lease ID 0xc4fd88915bf05edb: Processing first storage report for DS-226c85da-7fec-4c4f-9523-176d70b8f943 from datanode DatanodeRegistration(127.0.0.1:38509, datanodeUuid=cb8add91-94da-4995-a3f1-fe13c01d1ad0, infoPort=40597, infoSecurePort=0, ipcPort=36533, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641) 2024-12-07T01:24:13,221 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b1549ea03503e15 with lease ID 0xc4fd88915bf05edb: from storage DS-226c85da-7fec-4c4f-9523-176d70b8f943 node DatanodeRegistration(127.0.0.1:38509, datanodeUuid=cb8add91-94da-4995-a3f1-fe13c01d1ad0, infoPort=40597, infoSecurePort=0, ipcPort=36533, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T01:24:13,222 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb9e240e835c9af7 with lease ID 0xc4fd88915bf05edc: Processing first storage report for DS-767c29ce-7be5-4668-b2c0-f46bb398d859 from datanode DatanodeRegistration(127.0.0.1:40681, datanodeUuid=3bfca5df-b42a-4eb5-9c04-fc996d72f988, infoPort=36997, infoSecurePort=0, ipcPort=35357, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641) 2024-12-07T01:24:13,222 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb9e240e835c9af7 with lease ID 0xc4fd88915bf05edc: from storage DS-767c29ce-7be5-4668-b2c0-f46bb398d859 node DatanodeRegistration(127.0.0.1:40681, datanodeUuid=3bfca5df-b42a-4eb5-9c04-fc996d72f988, infoPort=36997, infoSecurePort=0, ipcPort=35357, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T01:24:13,222 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b1549ea03503e15 with lease ID 0xc4fd88915bf05edb: Processing first storage report for DS-1c322c32-1cdd-48f9-8530-ecdbe2bf32c9 from datanode DatanodeRegistration(127.0.0.1:38509, datanodeUuid=cb8add91-94da-4995-a3f1-fe13c01d1ad0, infoPort=40597, infoSecurePort=0, ipcPort=36533, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641) 2024-12-07T01:24:13,222 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b1549ea03503e15 with lease ID 0xc4fd88915bf05edb: from storage DS-1c322c32-1cdd-48f9-8530-ecdbe2bf32c9 node DatanodeRegistration(127.0.0.1:38509, datanodeUuid=cb8add91-94da-4995-a3f1-fe13c01d1ad0, infoPort=40597, infoSecurePort=0, ipcPort=36533, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T01:24:13,223 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb9e240e835c9af7 with lease ID 0xc4fd88915bf05edc: Processing first storage report for DS-d1bb36ac-259f-4e57-9e15-4ce8f6927bf6 from datanode DatanodeRegistration(127.0.0.1:40681, datanodeUuid=3bfca5df-b42a-4eb5-9c04-fc996d72f988, infoPort=36997, infoSecurePort=0, ipcPort=35357, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641) 2024-12-07T01:24:13,223 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb9e240e835c9af7 with lease ID 0xc4fd88915bf05edc: from storage DS-d1bb36ac-259f-4e57-9e15-4ce8f6927bf6 node DatanodeRegistration(127.0.0.1:40681, datanodeUuid=3bfca5df-b42a-4eb5-9c04-fc996d72f988, infoPort=36997, infoSecurePort=0, ipcPort=35357, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T01:24:13,241 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T01:24:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x926b8d04c0a005fc with lease ID 0xc4fd88915bf05edd: Processing first storage report for DS-c552b59d-4933-4b9c-acce-bfbec98f86fe from datanode DatanodeRegistration(127.0.0.1:35777, datanodeUuid=12105a20-257c-43a8-82d6-b8265e6e70c2, infoPort=41487, infoSecurePort=0, ipcPort=40131, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641) 2024-12-07T01:24:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x926b8d04c0a005fc with lease ID 0xc4fd88915bf05edd: from storage DS-c552b59d-4933-4b9c-acce-bfbec98f86fe node DatanodeRegistration(127.0.0.1:35777, datanodeUuid=12105a20-257c-43a8-82d6-b8265e6e70c2, infoPort=41487, infoSecurePort=0, ipcPort=40131, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T01:24:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x926b8d04c0a005fc with lease ID 0xc4fd88915bf05edd: Processing first storage report for DS-f30d5e0b-b95f-4caa-8426-7d9fc963c35a from datanode DatanodeRegistration(127.0.0.1:35777, datanodeUuid=12105a20-257c-43a8-82d6-b8265e6e70c2, infoPort=41487, infoSecurePort=0, ipcPort=40131, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641) 2024-12-07T01:24:13,247 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x926b8d04c0a005fc with lease ID 0xc4fd88915bf05edd: from storage DS-f30d5e0b-b95f-4caa-8426-7d9fc963c35a node DatanodeRegistration(127.0.0.1:35777, datanodeUuid=12105a20-257c-43a8-82d6-b8265e6e70c2, infoPort=41487, infoSecurePort=0, ipcPort=40131, storageInfo=lv=-57;cid=testClusterID;nsid=1996745715;c=1733534647641), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T01:24:13,355 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c 2024-12-07T01:24:13,414 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/zookeeper_0, clientPort=59844, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T01:24:13,422 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59844 2024-12-07T01:24:13,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:13,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:13,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741825_1001 (size=7) 2024-12-07T01:24:13,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741825_1001 (size=7) 2024-12-07T01:24:13,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741825_1001 (size=7) 2024-12-07T01:24:14,046 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee with version=8 2024-12-07T01:24:14,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/hbase-staging 2024-12-07T01:24:14,391 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ec1863dc21e5:0 server-side Connection retries=45 2024-12-07T01:24:14,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:14,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:14,403 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T01:24:14,403 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:14,404 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T01:24:14,523 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T01:24:14,577 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T01:24:14,586 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T01:24:14,590 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T01:24:14,613 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 70241 (auto-detected) 2024-12-07T01:24:14,614 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-07T01:24:14,629 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40763 2024-12-07T01:24:14,647 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40763 connecting to ZooKeeper ensemble=127.0.0.1:59844 2024-12-07T01:24:15,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:407630x0, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T01:24:15,318 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40763-0x101ad6397820000 connected 2024-12-07T01:24:15,471 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,476 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,489 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T01:24:15,493 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee, hbase.cluster.distributed=false 2024-12-07T01:24:15,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T01:24:15,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40763 2024-12-07T01:24:15,518 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40763 2024-12-07T01:24:15,518 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40763 2024-12-07T01:24:15,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40763 2024-12-07T01:24:15,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40763 2024-12-07T01:24:15,605 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ec1863dc21e5:0 server-side Connection retries=45 2024-12-07T01:24:15,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,607 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T01:24:15,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T01:24:15,610 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T01:24:15,612 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T01:24:15,613 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45471 2024-12-07T01:24:15,615 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45471 connecting to ZooKeeper ensemble=127.0.0.1:59844 2024-12-07T01:24:15,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454710x0, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T01:24:15,712 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45471-0x101ad6397820001 connected 2024-12-07T01:24:15,712 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T01:24:15,719 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T01:24:15,728 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T01:24:15,730 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T01:24:15,735 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T01:24:15,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45471 2024-12-07T01:24:15,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45471 2024-12-07T01:24:15,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45471 2024-12-07T01:24:15,739 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45471 2024-12-07T01:24:15,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45471 2024-12-07T01:24:15,753 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ec1863dc21e5:0 server-side Connection retries=45 2024-12-07T01:24:15,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,754 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T01:24:15,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T01:24:15,754 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T01:24:15,755 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T01:24:15,756 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45349 2024-12-07T01:24:15,757 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45349 connecting to ZooKeeper ensemble=127.0.0.1:59844 2024-12-07T01:24:15,758 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,759 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453490x0, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T01:24:15,827 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45349-0x101ad6397820002 connected 2024-12-07T01:24:15,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T01:24:15,828 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T01:24:15,829 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T01:24:15,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T01:24:15,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T01:24:15,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45349 2024-12-07T01:24:15,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45349 2024-12-07T01:24:15,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45349 2024-12-07T01:24:15,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45349 2024-12-07T01:24:15,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45349 2024-12-07T01:24:15,855 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ec1863dc21e5:0 server-side Connection retries=45 2024-12-07T01:24:15,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,855 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T01:24:15,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T01:24:15,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T01:24:15,856 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T01:24:15,856 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T01:24:15,857 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42825 2024-12-07T01:24:15,859 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42825 connecting to ZooKeeper ensemble=127.0.0.1:59844 2024-12-07T01:24:15,861 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:15,930 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428250x0, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T01:24:15,931 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428250x0, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T01:24:15,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42825-0x101ad6397820003 connected 2024-12-07T01:24:15,932 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T01:24:15,934 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T01:24:15,935 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T01:24:15,939 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T01:24:15,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42825 2024-12-07T01:24:15,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42825 2024-12-07T01:24:15,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42825 2024-12-07T01:24:15,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42825 2024-12-07T01:24:15,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42825 2024-12-07T01:24:15,961 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ec1863dc21e5:40763 2024-12-07T01:24:15,962 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:16,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,016 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,020 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:16,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T01:24:16,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T01:24:16,110 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T01:24:16,110 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,113 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T01:24:16,115 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ec1863dc21e5,40763,1733534654247 from backup master directory 2024-12-07T01:24:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:16,162 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T01:24:16,163 WARN [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T01:24:16,164 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:16,166 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T01:24:16,168 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T01:24:16,219 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/hbase.id] with ID: 80ce8d20-b7d8-4cb9-81e8-162bac6b070d 2024-12-07T01:24:16,219 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/.tmp/hbase.id 2024-12-07T01:24:16,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741826_1002 (size=42) 2024-12-07T01:24:16,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741826_1002 (size=42) 2024-12-07T01:24:16,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741826_1002 (size=42) 2024-12-07T01:24:16,235 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/.tmp/hbase.id]:[hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/hbase.id] 2024-12-07T01:24:16,279 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T01:24:16,284 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T01:24:16,301 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-07T01:24:16,362 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:16,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741827_1003 (size=196) 2024-12-07T01:24:16,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741827_1003 (size=196) 2024-12-07T01:24:16,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741827_1003 (size=196) 2024-12-07T01:24:16,399 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T01:24:16,400 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T01:24:16,406 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:16,429 WARN [IPC Server handler 3 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:16,429 WARN [IPC Server handler 3 on default port 42771 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:16,429 WARN [IPC Server handler 3 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:16,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741828_1004 (size=1189) 2024-12-07T01:24:16,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741828_1004 (size=1189) 2024-12-07T01:24:16,452 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store 2024-12-07T01:24:16,464 WARN [IPC Server handler 1 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:16,465 WARN [IPC Server handler 1 on default port 42771 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:16,465 WARN [IPC Server handler 1 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:16,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741829_1005 (size=34) 2024-12-07T01:24:16,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741829_1005 (size=34) 2024-12-07T01:24:16,476 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T01:24:16,479 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:16,481 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T01:24:16,481 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:16,481 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:16,482 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T01:24:16,483 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:16,483 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:16,484 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733534656481Disabling compacts and flushes for region at 1733534656481Disabling writes for close at 1733534656482 (+1 ms)Writing region close event to WAL at 1733534656483 (+1 ms)Closed at 1733534656483 2024-12-07T01:24:16,486 WARN [master/ec1863dc21e5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/.initializing 2024-12-07T01:24:16,487 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/WALs/ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:16,494 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:16,508 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ec1863dc21e5%2C40763%2C1733534654247, suffix=, logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/WALs/ec1863dc21e5,40763,1733534654247, archiveDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/oldWALs, maxLogs=10 2024-12-07T01:24:16,534 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/WALs/ec1863dc21e5,40763,1733534654247/ec1863dc21e5%2C40763%2C1733534654247.1733534656512, exclude list is [], retry=0 2024-12-07T01:24:16,537 WARN [IPC Server handler 4 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:16,537 WARN [IPC Server handler 4 on default port 42771 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:16,537 WARN [IPC Server handler 4 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:16,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:16,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:16,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:16,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T01:24:16,599 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/WALs/ec1863dc21e5,40763,1733534654247/ec1863dc21e5%2C40763%2C1733534654247.1733534656512 2024-12-07T01:24:16,601 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:16,602 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:16,603 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:16,606 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,606 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,637 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:16,638 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:16,638 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:16,638 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:16,639 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:16,639 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:16,639 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:16,639 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:16,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T01:24:16,662 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:16,664 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:16,665 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,668 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T01:24:16,668 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:16,669 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:16,669 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T01:24:16,672 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:16,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:16,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T01:24:16,676 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:16,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:16,677 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,680 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,682 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,686 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,687 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,690 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T01:24:16,692 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T01:24:16,697 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:16,698 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61520321, jitterRate=-0.08327578008174896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T01:24:16,703 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733534656617Initializing all the Stores at 1733534656619 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733534656620 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534656620Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534656621 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534656621Cleaning up temporary data from old regions at 1733534656687 (+66 ms)Region opened successfully at 1733534656703 (+16 ms) 2024-12-07T01:24:16,704 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T01:24:16,732 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@601e9ed5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ec1863dc21e5/172.17.0.3:0 2024-12-07T01:24:16,757 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T01:24:16,767 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T01:24:16,767 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T01:24:16,770 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T01:24:16,771 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T01:24:16,776 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-07T01:24:16,776 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T01:24:16,798 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T01:24:16,806 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T01:24:17,058 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T01:24:17,064 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T01:24:17,066 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T01:24:17,079 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T01:24:17,083 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T01:24:17,087 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T01:24:17,099 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T01:24:17,101 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T01:24:17,110 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T01:24:17,134 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T01:24:17,142 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T01:24:17,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T01:24:17,193 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T01:24:17,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T01:24:17,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T01:24:17,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,194 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,199 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ec1863dc21e5,40763,1733534654247, sessionid=0x101ad6397820000, setting cluster-up flag (Was=false) 2024-12-07T01:24:17,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,437 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,468 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T01:24:17,471 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:17,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,489 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:17,521 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T01:24:17,523 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:17,531 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T01:24:17,548 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(746): ClusterId : 80ce8d20-b7d8-4cb9-81e8-162bac6b070d 2024-12-07T01:24:17,548 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(746): ClusterId : 80ce8d20-b7d8-4cb9-81e8-162bac6b070d 2024-12-07T01:24:17,548 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(746): ClusterId : 80ce8d20-b7d8-4cb9-81e8-162bac6b070d 2024-12-07T01:24:17,550 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T01:24:17,550 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T01:24:17,550 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T01:24:17,565 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T01:24:17,565 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T01:24:17,565 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T01:24:17,565 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T01:24:17,565 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T01:24:17,565 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T01:24:17,606 INFO [AsyncFSWAL-0-hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData-prefix:ec1863dc21e5,40763,1733534654247 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-12-07T01:24:17,615 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T01:24:17,615 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T01:24:17,615 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T01:24:17,616 DEBUG [RS:2;ec1863dc21e5:42825 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@df2fdce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ec1863dc21e5/172.17.0.3:0 2024-12-07T01:24:17,616 DEBUG [RS:1;ec1863dc21e5:45349 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@552db18e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ec1863dc21e5/172.17.0.3:0 2024-12-07T01:24:17,616 DEBUG [RS:0;ec1863dc21e5:45471 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@209ba08f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ec1863dc21e5/172.17.0.3:0 2024-12-07T01:24:17,618 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T01:24:17,627 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T01:24:17,629 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ec1863dc21e5:45349 2024-12-07T01:24:17,630 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ec1863dc21e5:42825 2024-12-07T01:24:17,631 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T01:24:17,631 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T01:24:17,631 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T01:24:17,631 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T01:24:17,632 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T01:24:17,632 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T01:24:17,633 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T01:24:17,634 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(2659): reportForDuty to master=ec1863dc21e5,40763,1733534654247 with port=42825, startcode=1733534655854 2024-12-07T01:24:17,634 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(2659): reportForDuty to master=ec1863dc21e5,40763,1733534654247 with port=45349, startcode=1733534655753 2024-12-07T01:24:17,634 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ec1863dc21e5:45471 2024-12-07T01:24:17,634 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T01:24:17,634 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T01:24:17,635 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T01:24:17,636 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(2659): reportForDuty to master=ec1863dc21e5,40763,1733534654247 with port=45471, startcode=1733534655575 2024-12-07T01:24:17,639 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ec1863dc21e5,40763,1733534654247 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T01:24:17,645 DEBUG [RS:1;ec1863dc21e5:45349 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T01:24:17,645 DEBUG [RS:0;ec1863dc21e5:45471 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T01:24:17,645 DEBUG [RS:2;ec1863dc21e5:42825 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ec1863dc21e5:0, corePoolSize=5, maxPoolSize=5 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ec1863dc21e5:0, corePoolSize=5, maxPoolSize=5 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ec1863dc21e5:0, corePoolSize=5, maxPoolSize=5 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ec1863dc21e5:0, corePoolSize=5, maxPoolSize=5 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ec1863dc21e5:0, corePoolSize=10, maxPoolSize=10 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ec1863dc21e5:0, corePoolSize=2, maxPoolSize=2 2024-12-07T01:24:17,646 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:17,651 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733534687651 2024-12-07T01:24:17,652 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T01:24:17,652 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T01:24:17,652 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T01:24:17,653 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T01:24:17,657 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T01:24:17,657 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T01:24:17,658 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T01:24:17,658 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:17,658 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T01:24:17,658 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T01:24:17,659 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:17,664 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T01:24:17,666 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T01:24:17,666 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T01:24:17,672 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T01:24:17,673 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T01:24:17,678 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ec1863dc21e5:0:becomeActiveMaster-HFileCleaner.large.0-1733534657674,5,FailOnTimeoutGroup] 2024-12-07T01:24:17,679 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ec1863dc21e5:0:becomeActiveMaster-HFileCleaner.small.0-1733534657678,5,FailOnTimeoutGroup] 2024-12-07T01:24:17,679 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:17,679 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T01:24:17,680 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:17,680 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:17,687 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41287, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T01:24:17,687 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51417, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T01:24:17,687 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46459, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T01:24:17,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741831_1007 (size=1321) 2024-12-07T01:24:17,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741831_1007 (size=1321) 2024-12-07T01:24:17,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741831_1007 (size=1321) 2024-12-07T01:24:17,690 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T01:24:17,691 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee 2024-12-07T01:24:17,693 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40763 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:17,695 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40763 {}] master.ServerManager(517): Registering regionserver=ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:17,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741832_1008 (size=32) 2024-12-07T01:24:17,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741832_1008 (size=32) 2024-12-07T01:24:17,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741832_1008 (size=32) 2024-12-07T01:24:17,706 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:17,708 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40763 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:17,708 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40763 {}] master.ServerManager(517): Registering regionserver=ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:17,708 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T01:24:17,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T01:24:17,711 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:17,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:17,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T01:24:17,713 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee 2024-12-07T01:24:17,713 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42771 2024-12-07T01:24:17,713 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T01:24:17,714 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40763 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:17,714 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40763 {}] master.ServerManager(517): Registering regionserver=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:17,714 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee 2024-12-07T01:24:17,714 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42771 2024-12-07T01:24:17,714 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T01:24:17,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T01:24:17,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:17,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:17,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T01:24:17,718 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee 2024-12-07T01:24:17,718 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42771 2024-12-07T01:24:17,718 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T01:24:17,723 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T01:24:17,724 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:17,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:17,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T01:24:17,728 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T01:24:17,728 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:17,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:17,729 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T01:24:17,731 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740 2024-12-07T01:24:17,732 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740 2024-12-07T01:24:17,734 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T01:24:17,734 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T01:24:17,735 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T01:24:17,738 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T01:24:17,742 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:17,743 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64901400, jitterRate=-0.03289377689361572}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T01:24:17,745 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733534657706Initializing all the Stores at 1733534657708 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733534657708Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733534657708Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534657708Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733534657708Cleaning up temporary data from old regions at 1733534657734 (+26 ms)Region opened successfully at 1733534657745 (+11 ms) 2024-12-07T01:24:17,745 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T01:24:17,745 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T01:24:17,745 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T01:24:17,745 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T01:24:17,745 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T01:24:17,747 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T01:24:17,747 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733534657745Disabling compacts and flushes for region at 1733534657745Disabling writes for close at 1733534657745Writing region close event to WAL at 1733534657746 (+1 ms)Closed at 1733534657747 (+1 ms) 2024-12-07T01:24:17,750 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T01:24:17,750 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T01:24:17,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T01:24:17,765 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T01:24:17,768 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T01:24:17,919 WARN [ec1863dc21e5:40763 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T01:24:17,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T01:24:18,185 DEBUG [RS:1;ec1863dc21e5:45349 {}] zookeeper.ZKUtil(111): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:18,186 DEBUG [RS:2;ec1863dc21e5:42825 {}] zookeeper.ZKUtil(111): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:18,186 WARN [RS:2;ec1863dc21e5:42825 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T01:24:18,186 WARN [RS:1;ec1863dc21e5:45349 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T01:24:18,187 INFO [RS:2;ec1863dc21e5:42825 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:18,187 INFO [RS:1;ec1863dc21e5:45349 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:18,187 DEBUG [RS:0;ec1863dc21e5:45471 {}] zookeeper.ZKUtil(111): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:18,187 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:18,188 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:18,188 WARN [RS:0;ec1863dc21e5:45471 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T01:24:18,188 INFO [RS:0;ec1863dc21e5:45471 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:18,188 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:18,191 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ec1863dc21e5,42825,1733534655854] 2024-12-07T01:24:18,191 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ec1863dc21e5,45349,1733534655753] 2024-12-07T01:24:18,191 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ec1863dc21e5,45471,1733534655575] 2024-12-07T01:24:18,216 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T01:24:18,216 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T01:24:18,216 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T01:24:18,229 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T01:24:18,229 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T01:24:18,229 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T01:24:18,235 INFO [RS:1;ec1863dc21e5:45349 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T01:24:18,235 INFO [RS:0;ec1863dc21e5:45471 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T01:24:18,235 INFO [RS:2;ec1863dc21e5:42825 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T01:24:18,235 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,235 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,235 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,237 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T01:24:18,237 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T01:24:18,237 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T01:24:18,243 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T01:24:18,243 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T01:24:18,243 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T01:24:18,244 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,244 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,244 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,244 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,244 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0, corePoolSize=2, maxPoolSize=2 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0, corePoolSize=2, maxPoolSize=2 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0, corePoolSize=2, maxPoolSize=2 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ec1863dc21e5:0, corePoolSize=1, maxPoolSize=1 2024-12-07T01:24:18,245 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ec1863dc21e5:0, corePoolSize=3, maxPoolSize=3 2024-12-07T01:24:18,245 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ec1863dc21e5:0, corePoolSize=3, maxPoolSize=3 2024-12-07T01:24:18,245 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ec1863dc21e5:0, corePoolSize=3, maxPoolSize=3 2024-12-07T01:24:18,246 DEBUG [RS:2;ec1863dc21e5:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ec1863dc21e5:0, corePoolSize=3, maxPoolSize=3 2024-12-07T01:24:18,246 DEBUG [RS:0;ec1863dc21e5:45471 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ec1863dc21e5:0, corePoolSize=3, maxPoolSize=3 2024-12-07T01:24:18,246 DEBUG [RS:1;ec1863dc21e5:45349 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ec1863dc21e5:0, corePoolSize=3, maxPoolSize=3 2024-12-07T01:24:18,248 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,248 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,45471,1733534655575-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,42825,1733534655854-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T01:24:18,249 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,45349,1733534655753-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T01:24:18,266 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T01:24:18,266 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T01:24:18,266 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T01:24:18,268 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,45471,1733534655575-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,268 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,45349,1733534655753-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,268 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,42825,1733534655854-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,268 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,268 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,268 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,268 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.Replication(171): ec1863dc21e5,45349,1733534655753 started 2024-12-07T01:24:18,268 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.Replication(171): ec1863dc21e5,45471,1733534655575 started 2024-12-07T01:24:18,268 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.Replication(171): ec1863dc21e5,42825,1733534655854 started 2024-12-07T01:24:18,284 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,284 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1482): Serving as ec1863dc21e5,42825,1733534655854, RpcServer on ec1863dc21e5/172.17.0.3:42825, sessionid=0x101ad6397820003 2024-12-07T01:24:18,285 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T01:24:18,285 DEBUG [RS:2;ec1863dc21e5:42825 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:18,285 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ec1863dc21e5,42825,1733534655854' 2024-12-07T01:24:18,286 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T01:24:18,287 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T01:24:18,287 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T01:24:18,287 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T01:24:18,287 DEBUG [RS:2;ec1863dc21e5:42825 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:18,287 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ec1863dc21e5,42825,1733534655854' 2024-12-07T01:24:18,287 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T01:24:18,288 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T01:24:18,289 DEBUG [RS:2;ec1863dc21e5:42825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T01:24:18,289 INFO [RS:2;ec1863dc21e5:42825 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T01:24:18,289 INFO [RS:2;ec1863dc21e5:42825 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T01:24:18,291 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,291 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:18,291 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1482): Serving as ec1863dc21e5,45471,1733534655575, RpcServer on ec1863dc21e5/172.17.0.3:45471, sessionid=0x101ad6397820001 2024-12-07T01:24:18,291 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1482): Serving as ec1863dc21e5,45349,1733534655753, RpcServer on ec1863dc21e5/172.17.0.3:45349, sessionid=0x101ad6397820002 2024-12-07T01:24:18,291 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T01:24:18,291 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T01:24:18,291 DEBUG [RS:1;ec1863dc21e5:45349 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:18,291 DEBUG [RS:0;ec1863dc21e5:45471 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:18,291 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ec1863dc21e5,45471,1733534655575' 2024-12-07T01:24:18,291 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ec1863dc21e5,45349,1733534655753' 2024-12-07T01:24:18,291 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T01:24:18,291 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T01:24:18,292 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T01:24:18,292 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T01:24:18,292 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T01:24:18,292 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T01:24:18,292 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T01:24:18,292 DEBUG [RS:1;ec1863dc21e5:45349 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:18,292 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T01:24:18,292 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ec1863dc21e5,45349,1733534655753' 2024-12-07T01:24:18,292 DEBUG [RS:0;ec1863dc21e5:45471 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:18,292 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T01:24:18,293 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ec1863dc21e5,45471,1733534655575' 2024-12-07T01:24:18,293 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T01:24:18,293 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T01:24:18,293 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T01:24:18,293 DEBUG [RS:1;ec1863dc21e5:45349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T01:24:18,293 INFO [RS:1;ec1863dc21e5:45349 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T01:24:18,294 DEBUG [RS:0;ec1863dc21e5:45471 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T01:24:18,294 INFO [RS:1;ec1863dc21e5:45349 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T01:24:18,294 INFO [RS:0;ec1863dc21e5:45471 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T01:24:18,294 INFO [RS:0;ec1863dc21e5:45471 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T01:24:18,395 INFO [RS:2;ec1863dc21e5:42825 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:18,395 INFO [RS:0;ec1863dc21e5:45471 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:18,396 INFO [RS:1;ec1863dc21e5:45349 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:18,401 INFO [RS:0;ec1863dc21e5:45471 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ec1863dc21e5%2C45471%2C1733534655575, suffix=, logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45471,1733534655575, archiveDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs, maxLogs=32 2024-12-07T01:24:18,401 INFO [RS:1;ec1863dc21e5:45349 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ec1863dc21e5%2C45349%2C1733534655753, suffix=, logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753, archiveDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs, maxLogs=32 2024-12-07T01:24:18,401 INFO [RS:2;ec1863dc21e5:42825 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ec1863dc21e5%2C42825%2C1733534655854, suffix=, logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,42825,1733534655854, archiveDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs, maxLogs=32 2024-12-07T01:24:18,426 DEBUG [RS:2;ec1863dc21e5:42825 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,42825,1733534655854/ec1863dc21e5%2C42825%2C1733534655854.1733534658407, exclude list is [], retry=0 2024-12-07T01:24:18,426 DEBUG [RS:0;ec1863dc21e5:45471 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45471,1733534655575/ec1863dc21e5%2C45471%2C1733534655575.1733534658407, exclude list is [], retry=0 2024-12-07T01:24:18,426 DEBUG [RS:1;ec1863dc21e5:45349 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753/ec1863dc21e5%2C45349%2C1733534655753.1733534658407, exclude list is [], retry=0 2024-12-07T01:24:18,428 WARN [IPC Server handler 1 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:18,428 WARN [IPC Server handler 2 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:18,428 WARN [IPC Server handler 1 on default port 42771 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:18,428 WARN [IPC Server handler 0 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T01:24:18,429 WARN [IPC Server handler 1 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:18,429 WARN [IPC Server handler 2 on default port 42771 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:18,429 WARN [IPC Server handler 0 on default port 42771 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T01:24:18,429 WARN [IPC Server handler 2 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:18,429 WARN [IPC Server handler 0 on default port 42771 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T01:24:18,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:18,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:18,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:18,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:18,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:18,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:18,461 INFO [RS:2;ec1863dc21e5:42825 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,42825,1733534655854/ec1863dc21e5%2C42825%2C1733534655854.1733534658407 2024-12-07T01:24:18,462 DEBUG [RS:2;ec1863dc21e5:42825 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:18,462 INFO [RS:0;ec1863dc21e5:45471 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45471,1733534655575/ec1863dc21e5%2C45471%2C1733534655575.1733534658407 2024-12-07T01:24:18,463 DEBUG [RS:0;ec1863dc21e5:45471 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:18,464 INFO [RS:1;ec1863dc21e5:45349 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 2024-12-07T01:24:18,464 DEBUG [RS:1;ec1863dc21e5:45349 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:18,674 DEBUG [ec1863dc21e5:40763 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T01:24:18,684 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(204): Hosts are {ec1863dc21e5=0} racks are {/default-rack=0} 2024-12-07T01:24:18,691 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T01:24:18,691 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T01:24:18,691 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T01:24:18,692 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T01:24:18,692 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T01:24:18,692 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T01:24:18,692 INFO [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T01:24:18,692 INFO [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T01:24:18,692 INFO [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T01:24:18,692 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T01:24:18,700 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:18,704 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ec1863dc21e5,42825,1733534655854, state=OPENING 2024-12-07T01:24:18,900 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T01:24:18,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:18,910 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:18,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:18,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:18,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:18,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:18,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:18,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:18,915 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T01:24:18,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ec1863dc21e5,42825,1733534655854}] 2024-12-07T01:24:19,099 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T01:24:19,102 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57413, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T01:24:19,113 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T01:24:19,113 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:19,114 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T01:24:19,117 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ec1863dc21e5%2C42825%2C1733534655854.meta, suffix=.meta, logDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,42825,1733534655854, archiveDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs, maxLogs=32 2024-12-07T01:24:19,131 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,42825,1733534655854/ec1863dc21e5%2C42825%2C1733534655854.meta.1733534659118.meta, exclude list is [], retry=0 2024-12-07T01:24:19,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:19,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:19,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:19,138 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,42825,1733534655854/ec1863dc21e5%2C42825%2C1733534655854.meta.1733534659118.meta 2024-12-07T01:24:19,139 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:19,139 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:19,140 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T01:24:19,143 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T01:24:19,147 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T01:24:19,150 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T01:24:19,151 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:19,151 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T01:24:19,151 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T01:24:19,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T01:24:19,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T01:24:19,156 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:19,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:19,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T01:24:19,158 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T01:24:19,158 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:19,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:19,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T01:24:19,160 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T01:24:19,160 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:19,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:19,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T01:24:19,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T01:24:19,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:19,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T01:24:19,163 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T01:24:19,165 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740 2024-12-07T01:24:19,168 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740 2024-12-07T01:24:19,171 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T01:24:19,172 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T01:24:19,172 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T01:24:19,175 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T01:24:19,177 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63514598, jitterRate=-0.05355873703956604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T01:24:19,177 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T01:24:19,178 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733534659152Writing region info on filesystem at 1733534659152Initializing all the Stores at 1733534659154 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733534659154Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733534659154Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534659154Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733534659154Cleaning up temporary data from old regions at 1733534659172 (+18 ms)Running coprocessor post-open hooks at 1733534659177 (+5 ms)Region opened successfully at 1733534659178 (+1 ms) 2024-12-07T01:24:19,185 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733534659089 2024-12-07T01:24:19,196 DEBUG [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T01:24:19,197 INFO [RS_OPEN_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T01:24:19,199 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:19,201 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ec1863dc21e5,42825,1733534655854, state=OPEN 2024-12-07T01:24:19,215 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T01:24:19,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T01:24:19,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T01:24:19,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T01:24:19,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:19,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:19,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:19,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T01:24:19,216 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:19,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T01:24:19,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ec1863dc21e5,42825,1733534655854 in 298 msec 2024-12-07T01:24:19,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T01:24:19,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.4680 sec 2024-12-07T01:24:19,232 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T01:24:19,232 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T01:24:19,256 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T01:24:19,257 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ec1863dc21e5,42825,1733534655854, seqNum=-1] 2024-12-07T01:24:19,276 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T01:24:19,278 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34003, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T01:24:19,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.7320 sec 2024-12-07T01:24:19,298 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733534659297, completionTime=-1 2024-12-07T01:24:19,300 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T01:24:19,300 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T01:24:19,345 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T01:24:19,345 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733534719345 2024-12-07T01:24:19,345 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733534779345 2024-12-07T01:24:19,345 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 45 msec 2024-12-07T01:24:19,347 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T01:24:19,353 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,40763,1733534654247-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:19,353 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,40763,1733534654247-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:19,353 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,40763,1733534654247-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:19,355 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ec1863dc21e5:40763, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:19,355 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:19,356 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:19,364 DEBUG [master/ec1863dc21e5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T01:24:19,387 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.223sec 2024-12-07T01:24:19,388 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T01:24:19,389 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T01:24:19,390 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T01:24:19,390 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T01:24:19,390 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T01:24:19,391 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,40763,1733534654247-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T01:24:19,391 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,40763,1733534654247-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T01:24:19,396 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T01:24:19,397 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T01:24:19,397 INFO [master/ec1863dc21e5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ec1863dc21e5,40763,1733534654247-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:19,464 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ed908a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T01:24:19,465 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ec1863dc21e5,40763,-1 for getting cluster id 2024-12-07T01:24:19,467 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T01:24:19,475 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '80ce8d20-b7d8-4cb9-81e8-162bac6b070d' 2024-12-07T01:24:19,477 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T01:24:19,477 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "80ce8d20-b7d8-4cb9-81e8-162bac6b070d" 2024-12-07T01:24:19,478 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35031db1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T01:24:19,478 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ec1863dc21e5,40763,-1] 2024-12-07T01:24:19,480 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T01:24:19,482 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:19,483 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40620, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T01:24:19,486 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769aece, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T01:24:19,486 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T01:24:19,492 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ec1863dc21e5,42825,1733534655854, seqNum=-1] 2024-12-07T01:24:19,493 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T01:24:19,495 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T01:24:19,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:19,514 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:42771/hbase 2024-12-07T01:24:19,528 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=352, OpenFileDescriptor=581, MaxFileDescriptor=1048576, SystemLoadAverage=218, ProcessCount=11, AvailableMemoryMB=8172 2024-12-07T01:24:19,543 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:19,546 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:19,547 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:19,551 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-82564006, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-82564006, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:19,564 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-82564006/hregion-82564006.1733534659552, exclude list is [], retry=0 2024-12-07T01:24:19,568 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:19,569 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:19,569 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:19,572 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-82564006/hregion-82564006.1733534659552 2024-12-07T01:24:19,573 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:19,573 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => b986db061dc11270013d5c22b3b72966, NAME => 'testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741838_1014 (size=64) 2024-12-07T01:24:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741838_1014 (size=64) 2024-12-07T01:24:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741838_1014 (size=64) 2024-12-07T01:24:19,989 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:19,992 INFO [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:19,995 INFO [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b986db061dc11270013d5c22b3b72966 columnFamilyName a 2024-12-07T01:24:19,995 DEBUG [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:19,997 INFO [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] regionserver.HStore(327): Store=b986db061dc11270013d5c22b3b72966/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:19,997 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:19,999 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,000 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,001 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,001 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,004 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,008 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:20,009 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened b986db061dc11270013d5c22b3b72966; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73938137, jitterRate=0.10176409780979156}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:20,011 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for b986db061dc11270013d5c22b3b72966: Writing region info on filesystem at 1733534659989Initializing all the Stores at 1733534659992 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534659992Cleaning up temporary data from old regions at 1733534660001 (+9 ms)Region opened successfully at 1733534660011 (+10 ms) 2024-12-07T01:24:20,011 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing b986db061dc11270013d5c22b3b72966, disabling compactions & flushes 2024-12-07T01:24:20,011 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966. 2024-12-07T01:24:20,011 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966. 2024-12-07T01:24:20,011 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966. after waiting 0 ms 2024-12-07T01:24:20,011 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966. 2024-12-07T01:24:20,011 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966. 2024-12-07T01:24:20,012 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for b986db061dc11270013d5c22b3b72966: Waiting for close lock at 1733534660011Disabling compacts and flushes for region at 1733534660011Disabling writes for close at 1733534660011Writing region close event to WAL at 1733534660011Closed at 1733534660011 2024-12-07T01:24:20,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741837_1013 (size=95) 2024-12-07T01:24:20,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741837_1013 (size=95) 2024-12-07T01:24:20,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741837_1013 (size=95) 2024-12-07T01:24:20,022 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:20,023 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-82564006:(num 1733534659552) 2024-12-07T01:24:20,024 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T01:24:20,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741839_1015 (size=320) 2024-12-07T01:24:20,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741839_1015 (size=320) 2024-12-07T01:24:20,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741839_1015 (size=320) 2024-12-07T01:24:20,042 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T01:24:20,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741840_1016 (size=253) 2024-12-07T01:24:20,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741840_1016 (size=253) 2024-12-07T01:24:20,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741840_1016 (size=253) 2024-12-07T01:24:20,078 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1, size=320 (320bytes) 2024-12-07T01:24:20,078 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-07T01:24:20,078 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-07T01:24:20,079 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1 2024-12-07T01:24:20,083 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1 after 3ms 2024-12-07T01:24:20,088 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,089 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1 took 12ms 2024-12-07T01:24:20,098 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1 so closing down 2024-12-07T01:24:20,098 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:20,101 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-07T01:24:20,103 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000001-wal-1.temp 2024-12-07T01:24:20,104 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:20,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741841_1017 (size=320) 2024-12-07T01:24:20,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741841_1017 (size=320) 2024-12-07T01:24:20,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741841_1017 (size=320) 2024-12-07T01:24:20,116 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:20,118 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002 2024-12-07T01:24:20,123 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 29 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-07T01:24:20,123 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1, journal: Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1, size=320 (320bytes) at 1733534660078Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1 so closing down at 1733534660098 (+20 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000001-wal-1.temp at 1733534660103 (+5 ms)3 split writer threads finished at 1733534660104 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733534660116 (+12 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002 at 1733534660119 (+3 ms)Processed 2 edits across 1 Regions in 29 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733534660123 (+4 ms) 2024-12-07T01:24:20,142 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2, size=253 (253bytes) 2024-12-07T01:24:20,142 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2 2024-12-07T01:24:20,143 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2 after 1ms 2024-12-07T01:24:20,147 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,147 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2 took 5ms 2024-12-07T01:24:20,149 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2 so closing down 2024-12-07T01:24:20,149 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:20,152 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-07T01:24:20,153 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002-wal-2.temp 2024-12-07T01:24:20,154 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:20,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741842_1018 (size=253) 2024-12-07T01:24:20,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741842_1018 (size=253) 2024-12-07T01:24:20,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741842_1018 (size=253) 2024-12-07T01:24:20,163 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:20,168 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,171 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-12-07T01:24:20,173 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 26 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-07T01:24:20,174 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2, journal: Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2, size=253 (253bytes) at 1733534660142Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2 so closing down at 1733534660149 (+7 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002-wal-2.temp at 1733534660153 (+4 ms)3 split writer threads finished at 1733534660154 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733534660163 (+9 ms)Processed 1 edits across 1 Regions in 26 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733534660173 (+10 ms) 2024-12-07T01:24:20,174 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:20,176 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:20,188 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal.1733534660177, exclude list is [], retry=0 2024-12-07T01:24:20,193 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:20,193 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:20,194 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:20,196 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal.1733534660177 2024-12-07T01:24:20,196 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:20,196 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => b986db061dc11270013d5c22b3b72966, NAME => 'testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:20,197 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:20,197 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,197 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,199 INFO [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,201 INFO [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b986db061dc11270013d5c22b3b72966 columnFamilyName a 2024-12-07T01:24:20,201 DEBUG [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:20,202 INFO [StoreOpener-b986db061dc11270013d5c22b3b72966-1 {}] regionserver.HStore(327): Store=b986db061dc11270013d5c22b3b72966/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:20,202 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,203 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,205 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,206 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002 2024-12-07T01:24:20,210 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,215 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002 2024-12-07T01:24:20,218 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b986db061dc11270013d5c22b3b72966 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-07T01:24:20,265 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/.tmp/a/c2b8fc8df79d4e03b5d08658ee533385 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733534660023/Put/seqid=0 2024-12-07T01:24:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741844_1020 (size=5170) 2024-12-07T01:24:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741844_1020 (size=5170) 2024-12-07T01:24:20,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741844_1020 (size=5170) 2024-12-07T01:24:20,280 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/.tmp/a/c2b8fc8df79d4e03b5d08658ee533385 2024-12-07T01:24:20,321 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/.tmp/a/c2b8fc8df79d4e03b5d08658ee533385 as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/a/c2b8fc8df79d4e03b5d08658ee533385 2024-12-07T01:24:20,331 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/a/c2b8fc8df79d4e03b5d08658ee533385, entries=2, sequenceid=2, filesize=5.0 K 2024-12-07T01:24:20,336 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for b986db061dc11270013d5c22b3b72966 in 118ms, sequenceid=2, compaction requested=false; wal=null 2024-12-07T01:24:20,338 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/0000000000000000002 2024-12-07T01:24:20,339 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,339 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,342 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for b986db061dc11270013d5c22b3b72966 2024-12-07T01:24:20,345 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/b986db061dc11270013d5c22b3b72966/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-07T01:24:20,347 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened b986db061dc11270013d5c22b3b72966; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68001895, jitterRate=0.013307198882102966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:20,347 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for b986db061dc11270013d5c22b3b72966: Writing region info on filesystem at 1733534660197Initializing all the Stores at 1733534660198 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534660198Obtaining lock to block concurrent updates at 1733534660218 (+20 ms)Preparing flush snapshotting stores in b986db061dc11270013d5c22b3b72966 at 1733534660218Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733534660221 (+3 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733534659544.b986db061dc11270013d5c22b3b72966. at 1733534660221Flushing b986db061dc11270013d5c22b3b72966/a: creating writer at 1733534660222 (+1 ms)Flushing b986db061dc11270013d5c22b3b72966/a: appending metadata at 1733534660255 (+33 ms)Flushing b986db061dc11270013d5c22b3b72966/a: closing flushed file at 1733534660258 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d6e930: reopening flushed file at 1733534660320 (+62 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for b986db061dc11270013d5c22b3b72966 in 118ms, sequenceid=2, compaction requested=false; wal=null at 1733534660336 (+16 ms)Cleaning up temporary data from old regions at 1733534660339 (+3 ms)Region opened successfully at 1733534660347 (+8 ms) 2024-12-07T01:24:20,370 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=365 (was 352) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36636 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36500 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36626 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:35118 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:35246 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:42771/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36532 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=677 (was 581) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=225 (was 218) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8158 (was 8172) 2024-12-07T01:24:20,382 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=365, OpenFileDescriptor=677, MaxFileDescriptor=1048576, SystemLoadAverage=225, ProcessCount=11, AvailableMemoryMB=8158 2024-12-07T01:24:20,395 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:20,397 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:20,398 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:20,401 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-79887031, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-79887031, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:20,413 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-79887031/hregion-79887031.1733534660402, exclude list is [], retry=0 2024-12-07T01:24:20,416 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:20,417 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:20,417 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:20,419 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-79887031/hregion-79887031.1733534660402 2024-12-07T01:24:20,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:20,420 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 8ba6006ffa8d353c6daecf47951f5611, NAME => 'testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:20,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741846_1022 (size=64) 2024-12-07T01:24:20,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741846_1022 (size=64) 2024-12-07T01:24:20,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741846_1022 (size=64) 2024-12-07T01:24:20,434 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:20,437 INFO [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,439 INFO [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8ba6006ffa8d353c6daecf47951f5611 columnFamilyName a 2024-12-07T01:24:20,439 DEBUG [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:20,440 INFO [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] regionserver.HStore(327): Store=8ba6006ffa8d353c6daecf47951f5611/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:20,440 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,441 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,442 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,442 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,442 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,445 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,448 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:20,448 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8ba6006ffa8d353c6daecf47951f5611; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74293541, jitterRate=0.1070600301027298}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:20,449 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8ba6006ffa8d353c6daecf47951f5611: Writing region info on filesystem at 1733534660434Initializing all the Stores at 1733534660436 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534660436Cleaning up temporary data from old regions at 1733534660442 (+6 ms)Region opened successfully at 1733534660449 (+7 ms) 2024-12-07T01:24:20,449 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 8ba6006ffa8d353c6daecf47951f5611, disabling compactions & flushes 2024-12-07T01:24:20,449 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611. 2024-12-07T01:24:20,449 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611. 2024-12-07T01:24:20,449 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611. after waiting 0 ms 2024-12-07T01:24:20,449 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611. 2024-12-07T01:24:20,449 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611. 2024-12-07T01:24:20,449 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 8ba6006ffa8d353c6daecf47951f5611: Waiting for close lock at 1733534660449Disabling compacts and flushes for region at 1733534660449Disabling writes for close at 1733534660449Writing region close event to WAL at 1733534660449Closed at 1733534660449 2024-12-07T01:24:20,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741845_1021 (size=95) 2024-12-07T01:24:20,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741845_1021 (size=95) 2024-12-07T01:24:20,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741845_1021 (size=95) 2024-12-07T01:24:20,456 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:20,456 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-79887031:(num 1733534660402) 2024-12-07T01:24:20,457 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T01:24:20,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741847_1023 (size=320) 2024-12-07T01:24:20,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741847_1023 (size=320) 2024-12-07T01:24:20,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741847_1023 (size=320) 2024-12-07T01:24:20,477 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T01:24:20,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741848_1024 (size=253) 2024-12-07T01:24:20,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741848_1024 (size=253) 2024-12-07T01:24:20,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741848_1024 (size=253) 2024-12-07T01:24:20,507 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2, size=253 (253bytes) 2024-12-07T01:24:20,507 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2 2024-12-07T01:24:20,508 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2 after 1ms 2024-12-07T01:24:20,512 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,512 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2 took 5ms 2024-12-07T01:24:20,515 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2 so closing down 2024-12-07T01:24:20,515 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:20,517 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-07T01:24:20,519 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002-wal-2.temp 2024-12-07T01:24:20,519 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:20,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741849_1025 (size=253) 2024-12-07T01:24:20,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741849_1025 (size=253) 2024-12-07T01:24:20,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741849_1025 (size=253) 2024-12-07T01:24:20,528 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:20,530 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002 2024-12-07T01:24:20,530 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-07T01:24:20,530 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2, journal: Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2, size=253 (253bytes) at 1733534660507Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2 so closing down at 1733534660515 (+8 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002-wal-2.temp at 1733534660519 (+4 ms)3 split writer threads finished at 1733534660519Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733534660528 (+9 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002 at 1733534660530 (+2 ms)Processed 1 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733534660530 2024-12-07T01:24:20,543 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1, size=320 (320bytes) 2024-12-07T01:24:20,544 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1 2024-12-07T01:24:20,544 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1 after 0ms 2024-12-07T01:24:20,548 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,549 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1 took 6ms 2024-12-07T01:24:20,552 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1 so closing down 2024-12-07T01:24:20,552 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:20,554 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-07T01:24:20,556 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000001-wal-1.temp 2024-12-07T01:24:20,556 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:20,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741850_1026 (size=320) 2024-12-07T01:24:20,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741850_1026 (size=320) 2024-12-07T01:24:20,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741850_1026 (size=320) 2024-12-07T01:24:20,564 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:20,568 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,570 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002, length=253 2024-12-07T01:24:20,573 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002 2024-12-07T01:24:20,573 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-07T01:24:20,573 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1, journal: Splitting hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1, size=320 (320bytes) at 1733534660543Finishing writing output for hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1 so closing down at 1733534660552 (+9 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000001-wal-1.temp at 1733534660556 (+4 ms)3 split writer threads finished at 1733534660556Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733534660564 (+8 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002 at 1733534660573 (+9 ms)Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733534660573 2024-12-07T01:24:20,573 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:20,576 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:20,589 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal.1733534660576, exclude list is [], retry=0 2024-12-07T01:24:20,592 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:20,593 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:20,594 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:20,595 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal.1733534660576 2024-12-07T01:24:20,596 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:20,596 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 8ba6006ffa8d353c6daecf47951f5611, NAME => 'testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:20,596 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:20,596 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,596 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,598 INFO [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,600 INFO [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8ba6006ffa8d353c6daecf47951f5611 columnFamilyName a 2024-12-07T01:24:20,600 DEBUG [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:20,601 INFO [StoreOpener-8ba6006ffa8d353c6daecf47951f5611-1 {}] regionserver.HStore(327): Store=8ba6006ffa8d353c6daecf47951f5611/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:20,601 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,601 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,604 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,604 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002 2024-12-07T01:24:20,608 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:20,610 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002 2024-12-07T01:24:20,610 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8ba6006ffa8d353c6daecf47951f5611 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-07T01:24:20,625 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/.tmp/a/42e4bc46d21247f081752d305afc2bd3 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733534660456/Put/seqid=0 2024-12-07T01:24:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741852_1028 (size=5170) 2024-12-07T01:24:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741852_1028 (size=5170) 2024-12-07T01:24:20,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741852_1028 (size=5170) 2024-12-07T01:24:20,635 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/.tmp/a/42e4bc46d21247f081752d305afc2bd3 2024-12-07T01:24:20,645 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/.tmp/a/42e4bc46d21247f081752d305afc2bd3 as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/a/42e4bc46d21247f081752d305afc2bd3 2024-12-07T01:24:20,653 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/a/42e4bc46d21247f081752d305afc2bd3, entries=2, sequenceid=2, filesize=5.0 K 2024-12-07T01:24:20,653 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 8ba6006ffa8d353c6daecf47951f5611 in 43ms, sequenceid=2, compaction requested=false; wal=null 2024-12-07T01:24:20,654 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/0000000000000000002 2024-12-07T01:24:20,655 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,655 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,657 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8ba6006ffa8d353c6daecf47951f5611 2024-12-07T01:24:20,660 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/8ba6006ffa8d353c6daecf47951f5611/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-07T01:24:20,661 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8ba6006ffa8d353c6daecf47951f5611; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71446771, jitterRate=0.0646398514509201}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:20,661 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8ba6006ffa8d353c6daecf47951f5611: Writing region info on filesystem at 1733534660596Initializing all the Stores at 1733534660598 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534660598Obtaining lock to block concurrent updates at 1733534660610 (+12 ms)Preparing flush snapshotting stores in 8ba6006ffa8d353c6daecf47951f5611 at 1733534660610Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733534660610Flushing stores of testReplayEditsWrittenIntoWAL,,1733534660395.8ba6006ffa8d353c6daecf47951f5611. at 1733534660610Flushing 8ba6006ffa8d353c6daecf47951f5611/a: creating writer at 1733534660610Flushing 8ba6006ffa8d353c6daecf47951f5611/a: appending metadata at 1733534660624 (+14 ms)Flushing 8ba6006ffa8d353c6daecf47951f5611/a: closing flushed file at 1733534660624Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3052174c: reopening flushed file at 1733534660644 (+20 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 8ba6006ffa8d353c6daecf47951f5611 in 43ms, sequenceid=2, compaction requested=false; wal=null at 1733534660653 (+9 ms)Cleaning up temporary data from old regions at 1733534660655 (+2 ms)Region opened successfully at 1733534660661 (+6 ms) 2024-12-07T01:24:20,679 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=375 (was 365) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36636 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36500 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:35118 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36574 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:35350 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36694 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=761 (was 677) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=225 (was 225), ProcessCount=11 (was 11), AvailableMemoryMB=8146 (was 8158) 2024-12-07T01:24:20,689 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=375, OpenFileDescriptor=761, MaxFileDescriptor=1048576, SystemLoadAverage=225, ProcessCount=11, AvailableMemoryMB=8145 2024-12-07T01:24:20,703 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:20,705 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:20,705 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:20,708 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-50290126, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-50290126, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:20,721 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-50290126/hregion-50290126.1733534660709, exclude list is [], retry=0 2024-12-07T01:24:20,725 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:20,725 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:20,725 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:20,728 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-50290126/hregion-50290126.1733534660709 2024-12-07T01:24:20,729 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:41487:41487)] 2024-12-07T01:24:20,729 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 51e09c56fa39fbc8cc2ac1f0ea6a1dcc, NAME => 'testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:20,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741854_1030 (size=64) 2024-12-07T01:24:20,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741854_1030 (size=64) 2024-12-07T01:24:20,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741854_1030 (size=64) 2024-12-07T01:24:20,744 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:20,750 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,752 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc columnFamilyName a 2024-12-07T01:24:20,752 DEBUG [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:20,753 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(327): Store=51e09c56fa39fbc8cc2ac1f0ea6a1dcc/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:20,753 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,755 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc columnFamilyName b 2024-12-07T01:24:20,755 DEBUG [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:20,756 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(327): Store=51e09c56fa39fbc8cc2ac1f0ea6a1dcc/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:20,756 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,758 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc columnFamilyName c 2024-12-07T01:24:20,758 DEBUG [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:20,759 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(327): Store=51e09c56fa39fbc8cc2ac1f0ea6a1dcc/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:20,759 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,760 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,761 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,762 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,762 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,763 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:20,764 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:20,767 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:20,768 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 51e09c56fa39fbc8cc2ac1f0ea6a1dcc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74101238, jitterRate=0.10419449210166931}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:20,768 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc: Writing region info on filesystem at 1733534660745Initializing all the Stores at 1733534660746 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534660746Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534660749 (+3 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534660749Cleaning up temporary data from old regions at 1733534660762 (+13 ms)Region opened successfully at 1733534660768 (+6 ms) 2024-12-07T01:24:20,768 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 51e09c56fa39fbc8cc2ac1f0ea6a1dcc, disabling compactions & flushes 2024-12-07T01:24:20,768 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:20,768 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:20,768 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. after waiting 0 ms 2024-12-07T01:24:20,768 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:20,769 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:20,769 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc: Waiting for close lock at 1733534660768Disabling compacts and flushes for region at 1733534660768Disabling writes for close at 1733534660768Writing region close event to WAL at 1733534660769 (+1 ms)Closed at 1733534660769 2024-12-07T01:24:20,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741853_1029 (size=95) 2024-12-07T01:24:20,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741853_1029 (size=95) 2024-12-07T01:24:20,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741853_1029 (size=95) 2024-12-07T01:24:20,776 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:20,776 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-50290126:(num 1733534660709) 2024-12-07T01:24:20,776 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:20,778 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:20,793 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779, exclude list is [], retry=0 2024-12-07T01:24:20,796 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:20,797 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:20,797 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:20,799 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 2024-12-07T01:24:20,800 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:21,016 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779, size=0 (0bytes) 2024-12-07T01:24:21,016 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 might be still open, length is 0 2024-12-07T01:24:21,016 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 2024-12-07T01:24:21,018 WARN [IPC Server handler 0 on default port 42771 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-12-07T01:24:21,018 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 after 2ms 2024-12-07T01:24:22,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741829_1005 (size=34) 2024-12-07T01:24:22,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741828_1004 (size=1189) 2024-12-07T01:24:22,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:35382 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:38509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35382 dst: /127.0.0.1:38509 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38509 remote=/127.0.0.1:35382]. Total timeout mills is 60000, 58719 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:22,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36734 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36734 dst: /127.0.0.1:35777 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:22,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:36608 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:40681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36608 dst: /127.0.0.1:40681 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741855_1032 (size=263633) 2024-12-07T01:24:22,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741855_1032 (size=263633) 2024-12-07T01:24:22,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741855_1032 (size=263633) 2024-12-07T01:24:24,664 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T01:24:24,710 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T01:24:24,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T01:24:24,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:24,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T01:24:24,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T01:24:24,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T01:24:24,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:24,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-12-07T01:24:24,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:25,020 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 after 4004ms 2024-12-07T01:24:25,030 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:25,032 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 took 4016ms 2024-12-07T01:24:25,038 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733534660779.temp 2024-12-07T01:24:25,040 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000000001-wal.1733534660779.temp 2024-12-07T01:24:25,146 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779; continuing. 2024-12-07T01:24:25,146 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 so closing down 2024-12-07T01:24:25,146 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:25,146 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:25,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741856_1033 (size=263641) 2024-12-07T01:24:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741856_1033 (size=263641) 2024-12-07T01:24:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741856_1033 (size=263641) 2024-12-07T01:24:25,150 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000000001-wal.1733534660779.temp (wrote 3002 edits, skipped 0 edits in 67 ms) 2024-12-07T01:24:25,152 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000000001-wal.1733534660779.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000003002 2024-12-07T01:24:25,152 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 119 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T01:24:25,152 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779, journal: Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779, size=0 (0bytes) at 1733534661016Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000000001-wal.1733534660779.temp at 1733534665040 (+4024 ms)Split 1024 edits, skipped 0 edits. at 1733534665096 (+56 ms)Split 2048 edits, skipped 0 edits. at 1733534665120 (+24 ms)Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 so closing down at 1733534665146 (+26 ms)3 split writer threads finished at 1733534665146Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000000001-wal.1733534660779.temp (wrote 3002 edits, skipped 0 edits in 67 ms) at 1733534665150 (+4 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000000001-wal.1733534660779.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000003002 at 1733534665152 (+2 ms)Processed 3002 edits across 1 Regions in 119 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779, size=0, length=0, corrupted=false, cancelled=false at 1733534665152 2024-12-07T01:24:25,154 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534660779 2024-12-07T01:24:25,156 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000003002 2024-12-07T01:24:25,156 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:25,159 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:25,172 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534665159, exclude list is [], retry=0 2024-12-07T01:24:25,175 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:25,176 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:25,176 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:25,179 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534665159 2024-12-07T01:24:25,179 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487)] 2024-12-07T01:24:25,180 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:25,182 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,184 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc columnFamilyName a 2024-12-07T01:24:25,184 DEBUG [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:25,184 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(327): Store=51e09c56fa39fbc8cc2ac1f0ea6a1dcc/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:25,185 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,186 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc columnFamilyName b 2024-12-07T01:24:25,186 DEBUG [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:25,186 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(327): Store=51e09c56fa39fbc8cc2ac1f0ea6a1dcc/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:25,187 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,188 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51e09c56fa39fbc8cc2ac1f0ea6a1dcc columnFamilyName c 2024-12-07T01:24:25,188 DEBUG [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:25,188 INFO [StoreOpener-51e09c56fa39fbc8cc2ac1f0ea6a1dcc-1 {}] regionserver.HStore(327): Store=51e09c56fa39fbc8cc2ac1f0ea6a1dcc/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:25,188 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,189 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,192 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,192 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000003002 2024-12-07T01:24:25,196 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:25,237 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T01:24:25,564 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-07T01:24:25,604 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/a/ee2ad75649274f5399cd8768008824ce is 62, key is testReplayEditsWrittenIntoWAL/a:100/1733534660807/Put/seqid=0 2024-12-07T01:24:25,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741858_1035 (size=50463) 2024-12-07T01:24:25,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741858_1035 (size=50463) 2024-12-07T01:24:25,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741858_1035 (size=50463) 2024-12-07T01:24:25,613 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/a/ee2ad75649274f5399cd8768008824ce 2024-12-07T01:24:25,620 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/a/ee2ad75649274f5399cd8768008824ce as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/a/ee2ad75649274f5399cd8768008824ce 2024-12-07T01:24:25,625 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/a/ee2ad75649274f5399cd8768008824ce, entries=754, sequenceid=754, filesize=49.3 K 2024-12-07T01:24:25,626 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc in 63ms, sequenceid=754, compaction requested=false; wal=null 2024-12-07T01:24:25,655 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T01:24:25,656 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-07T01:24:25,663 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/a/aea7bcc0fe4d457c94e51a7184f5b6aa is 62, key is testReplayEditsWrittenIntoWAL/a:754/1733534660860/Put/seqid=0 2024-12-07T01:24:25,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741859_1036 (size=20072) 2024-12-07T01:24:25,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741859_1036 (size=20072) 2024-12-07T01:24:25,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741859_1036 (size=20072) 2024-12-07T01:24:25,676 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/a/aea7bcc0fe4d457c94e51a7184f5b6aa 2024-12-07T01:24:25,710 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/b/5e44aee187d145778f33765322b0840a is 62, key is testReplayEditsWrittenIntoWAL/b:100/1733534660887/Put/seqid=0 2024-12-07T01:24:25,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741860_1037 (size=35835) 2024-12-07T01:24:25,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741860_1037 (size=35835) 2024-12-07T01:24:25,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741860_1037 (size=35835) 2024-12-07T01:24:25,719 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/b/5e44aee187d145778f33765322b0840a 2024-12-07T01:24:25,728 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/a/aea7bcc0fe4d457c94e51a7184f5b6aa as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/a/aea7bcc0fe4d457c94e51a7184f5b6aa 2024-12-07T01:24:25,740 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/a/aea7bcc0fe4d457c94e51a7184f5b6aa, entries=246, sequenceid=1508, filesize=19.6 K 2024-12-07T01:24:25,742 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/b/5e44aee187d145778f33765322b0840a as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/b/5e44aee187d145778f33765322b0840a 2024-12-07T01:24:25,749 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/b/5e44aee187d145778f33765322b0840a, entries=508, sequenceid=1508, filesize=35.0 K 2024-12-07T01:24:25,750 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc in 93ms, sequenceid=1508, compaction requested=false; wal=null 2024-12-07T01:24:25,770 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T01:24:25,770 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-07T01:24:25,777 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/b/aa00fd74a2364c76a34d097715caee03 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1733534660915/Put/seqid=0 2024-12-07T01:24:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741861_1038 (size=35082) 2024-12-07T01:24:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741861_1038 (size=35082) 2024-12-07T01:24:25,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741861_1038 (size=35082) 2024-12-07T01:24:25,789 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/b/aa00fd74a2364c76a34d097715caee03 2024-12-07T01:24:25,814 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/c/222d3480210d437a8f1e369e5ecfd483 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1733534660947/Put/seqid=0 2024-12-07T01:24:25,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741862_1039 (size=20825) 2024-12-07T01:24:25,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741862_1039 (size=20825) 2024-12-07T01:24:25,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741862_1039 (size=20825) 2024-12-07T01:24:25,823 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/c/222d3480210d437a8f1e369e5ecfd483 2024-12-07T01:24:25,832 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/b/aa00fd74a2364c76a34d097715caee03 as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/b/aa00fd74a2364c76a34d097715caee03 2024-12-07T01:24:25,840 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/b/aa00fd74a2364c76a34d097715caee03, entries=492, sequenceid=2262, filesize=34.3 K 2024-12-07T01:24:25,843 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/c/222d3480210d437a8f1e369e5ecfd483 as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/c/222d3480210d437a8f1e369e5ecfd483 2024-12-07T01:24:25,850 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/c/222d3480210d437a8f1e369e5ecfd483, entries=262, sequenceid=2262, filesize=20.3 K 2024-12-07T01:24:25,850 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc in 80ms, sequenceid=2262, compaction requested=false; wal=null 2024-12-07T01:24:25,861 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1733534660981/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:25,865 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000003002 2024-12-07T01:24:25,865 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T01:24:25,866 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-12-07T01:24:25,875 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/c/0ac32cb993a74e21ab40e0ac4735bd7f is 62, key is testReplayEditsWrittenIntoWAL/c:262/1733534660953/Put/seqid=0 2024-12-07T01:24:25,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741863_1040 (size=50301) 2024-12-07T01:24:25,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741863_1040 (size=50301) 2024-12-07T01:24:25,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741863_1040 (size=50301) 2024-12-07T01:24:25,886 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/c/0ac32cb993a74e21ab40e0ac4735bd7f 2024-12-07T01:24:25,895 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0ac32cb993a74e21ab40e0ac4735bd7f 2024-12-07T01:24:25,897 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/.tmp/c/0ac32cb993a74e21ab40e0ac4735bd7f as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/c/0ac32cb993a74e21ab40e0ac4735bd7f 2024-12-07T01:24:25,904 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0ac32cb993a74e21ab40e0ac4735bd7f 2024-12-07T01:24:25,904 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/c/0ac32cb993a74e21ab40e0ac4735bd7f, entries=739, sequenceid=3002, filesize=49.1 K 2024-12-07T01:24:25,904 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc in 38ms, sequenceid=3002, compaction requested=false; wal=null 2024-12-07T01:24:25,905 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/0000000000000003002 2024-12-07T01:24:25,907 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,907 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,908 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T01:24:25,910 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc 2024-12-07T01:24:25,914 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenIntoWAL/51e09c56fa39fbc8cc2ac1f0ea6a1dcc/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-12-07T01:24:25,921 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 51e09c56fa39fbc8cc2ac1f0ea6a1dcc; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69778023, jitterRate=0.039773568511009216}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T01:24:25,921 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc: Writing region info on filesystem at 1733534665180Initializing all the Stores at 1733534665181 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534665181Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534665182 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534665182Cleaning up temporary data from old regions at 1733534665907 (+725 ms)Region opened successfully at 1733534665921 (+14 ms) 2024-12-07T01:24:25,988 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 51e09c56fa39fbc8cc2ac1f0ea6a1dcc, disabling compactions & flushes 2024-12-07T01:24:25,988 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:25,988 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:25,988 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. after waiting 0 ms 2024-12-07T01:24:25,988 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:25,991 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733534660703.51e09c56fa39fbc8cc2ac1f0ea6a1dcc. 2024-12-07T01:24:25,991 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 51e09c56fa39fbc8cc2ac1f0ea6a1dcc: Waiting for close lock at 1733534665988Disabling compacts and flushes for region at 1733534665988Disabling writes for close at 1733534665988Writing region close event to WAL at 1733534665990 (+2 ms)Closed at 1733534665990 2024-12-07T01:24:25,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741857_1034 (size=95) 2024-12-07T01:24:25,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741857_1034 (size=95) 2024-12-07T01:24:25,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741857_1034 (size=95) 2024-12-07T01:24:25,999 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:25,999 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733534665159) 2024-12-07T01:24:26,012 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=391 (was 375) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@7942e1ab[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-763034285_22 at /127.0.0.1:44328 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:35357 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-763034285_22 at /127.0.0.1:44308 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@3bcc3595[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-763034285_22 at /127.0.0.1:46592 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42771 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:36533 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35357 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-763034285_22 at /127.0.0.1:37080 [Waiting for operation #23] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:42771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=841 (was 761) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=207 (was 225), ProcessCount=11 (was 11), AvailableMemoryMB=8080 (was 8145) 2024-12-07T01:24:26,023 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=391, OpenFileDescriptor=841, MaxFileDescriptor=1048576, SystemLoadAverage=207, ProcessCount=11, AvailableMemoryMB=8079 2024-12-07T01:24:26,037 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:26,039 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:26,040 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:26,043 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-45451287, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-45451287, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:26,055 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-45451287/hregion-45451287.1733534666043, exclude list is [], retry=0 2024-12-07T01:24:26,059 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:26,059 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:26,059 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:26,062 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-45451287/hregion-45451287.1733534666043 2024-12-07T01:24:26,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:26,063 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 7e17939fa8b2a796fc159cc6e5174f27, NAME => 'test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:26,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741865_1042 (size=43) 2024-12-07T01:24:26,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741865_1042 (size=43) 2024-12-07T01:24:26,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741865_1042 (size=43) 2024-12-07T01:24:26,075 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:26,076 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,081 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e17939fa8b2a796fc159cc6e5174f27 columnFamilyName a 2024-12-07T01:24:26,081 DEBUG [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:26,082 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(327): Store=7e17939fa8b2a796fc159cc6e5174f27/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:26,082 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,084 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e17939fa8b2a796fc159cc6e5174f27 columnFamilyName b 2024-12-07T01:24:26,084 DEBUG [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:26,085 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(327): Store=7e17939fa8b2a796fc159cc6e5174f27/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:26,085 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,087 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e17939fa8b2a796fc159cc6e5174f27 columnFamilyName c 2024-12-07T01:24:26,087 DEBUG [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:26,087 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(327): Store=7e17939fa8b2a796fc159cc6e5174f27/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:26,088 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,088 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,089 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,090 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,090 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,090 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:26,092 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,094 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:26,095 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7e17939fa8b2a796fc159cc6e5174f27; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69626129, jitterRate=0.03751017153263092}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:26,096 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7e17939fa8b2a796fc159cc6e5174f27: Writing region info on filesystem at 1733534666075Initializing all the Stores at 1733534666076 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534666076Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534666076Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534666076Cleaning up temporary data from old regions at 1733534666090 (+14 ms)Region opened successfully at 1733534666096 (+6 ms) 2024-12-07T01:24:26,096 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7e17939fa8b2a796fc159cc6e5174f27, disabling compactions & flushes 2024-12-07T01:24:26,096 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,096 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,096 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. after waiting 0 ms 2024-12-07T01:24:26,096 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,097 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,097 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7e17939fa8b2a796fc159cc6e5174f27: Waiting for close lock at 1733534666096Disabling compacts and flushes for region at 1733534666096Disabling writes for close at 1733534666096Writing region close event to WAL at 1733534666097 (+1 ms)Closed at 1733534666097 2024-12-07T01:24:26,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741864_1041 (size=95) 2024-12-07T01:24:26,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741864_1041 (size=95) 2024-12-07T01:24:26,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741864_1041 (size=95) 2024-12-07T01:24:26,103 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:26,104 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-45451287:(num 1733534666043) 2024-12-07T01:24:26,104 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:26,108 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:26,120 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108, exclude list is [], retry=0 2024-12-07T01:24:26,123 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:26,124 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:26,124 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:26,126 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108 2024-12-07T01:24:26,126 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:41487:41487)] 2024-12-07T01:24:26,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741866_1043 (size=263359) 2024-12-07T01:24:26,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741866_1043 (size=263359) 2024-12-07T01:24:26,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741866_1043 (size=263359) 2024-12-07T01:24:26,284 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108, size=257.2 K (263359bytes) 2024-12-07T01:24:26,284 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108 2024-12-07T01:24:26,285 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108 after 1ms 2024-12-07T01:24:26,287 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:26,289 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108 took 5ms 2024-12-07T01:24:26,293 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733534666108.temp 2024-12-07T01:24:26,295 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000000001-wal.1733534666108.temp 2024-12-07T01:24:26,345 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108 so closing down 2024-12-07T01:24:26,345 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:26,346 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:26,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741867_1044 (size=263359) 2024-12-07T01:24:26,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741867_1044 (size=263359) 2024-12-07T01:24:26,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741867_1044 (size=263359) 2024-12-07T01:24:26,352 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000000001-wal.1733534666108.temp (wrote 3000 edits, skipped 0 edits in 42 ms) 2024-12-07T01:24:26,353 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000000001-wal.1733534666108.temp to hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003000 2024-12-07T01:24:26,353 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 63 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-12-07T01:24:26,353 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108, journal: Splitting hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108, size=257.2 K (263359bytes) at 1733534666284Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000000001-wal.1733534666108.temp at 1733534666295 (+11 ms)Split 1024 edits, skipped 0 edits. at 1733534666312 (+17 ms)Split 2048 edits, skipped 0 edits. at 1733534666329 (+17 ms)Finishing writing output for hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108 so closing down at 1733534666345 (+16 ms)3 split writer threads finished at 1733534666346 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000000001-wal.1733534666108.temp (wrote 3000 edits, skipped 0 edits in 42 ms) at 1733534666352 (+6 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000000001-wal.1733534666108.temp to hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003000 at 1733534666353 (+1 ms)Processed 3000 edits across 1 Regions in 63 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1733534666353 2024-12-07T01:24:26,355 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666108 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534666108 2024-12-07T01:24:26,356 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003000 2024-12-07T01:24:26,356 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:26,359 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:26,372 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359, exclude list is [], retry=0 2024-12-07T01:24:26,376 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:26,377 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:26,378 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:26,380 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359 2024-12-07T01:24:26,380 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:26,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741868_1045 (size=263486) 2024-12-07T01:24:26,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741868_1045 (size=263486) 2024-12-07T01:24:26,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741868_1045 (size=263486) 2024-12-07T01:24:26,525 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359, size=257.3 K (263486bytes) 2024-12-07T01:24:26,525 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359 2024-12-07T01:24:26,525 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359 after 0ms 2024-12-07T01:24:26,528 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:26,530 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359 took 6ms 2024-12-07T01:24:26,534 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1733534666359.temp 2024-12-07T01:24:26,536 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003001-wal.1733534666359.temp 2024-12-07T01:24:26,581 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359 so closing down 2024-12-07T01:24:26,581 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:26,582 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:26,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741869_1046 (size=263486) 2024-12-07T01:24:26,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741869_1046 (size=263486) 2024-12-07T01:24:26,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741869_1046 (size=263486) 2024-12-07T01:24:26,585 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003001-wal.1733534666359.temp (wrote 3000 edits, skipped 0 edits in 36 ms) 2024-12-07T01:24:26,587 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003001-wal.1733534666359.temp to hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000006000 2024-12-07T01:24:26,587 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 57 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-12-07T01:24:26,587 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359, journal: Splitting hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359, size=257.3 K (263486bytes) at 1733534666525Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003001-wal.1733534666359.temp at 1733534666536 (+11 ms)Split 1024 edits, skipped 0 edits. at 1733534666547 (+11 ms)Split 2048 edits, skipped 0 edits. at 1733534666564 (+17 ms)Finishing writing output for hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359 so closing down at 1733534666581 (+17 ms)3 split writer threads finished at 1733534666582 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003001-wal.1733534666359.temp (wrote 3000 edits, skipped 0 edits in 36 ms) at 1733534666585 (+3 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003001-wal.1733534666359.temp to hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000006000 at 1733534666587 (+2 ms)Processed 3000 edits across 1 Regions in 57 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1733534666587 2024-12-07T01:24:26,590 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666359 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534666359 2024-12-07T01:24:26,592 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000006000 2024-12-07T01:24:26,592 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:26,595 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/test2727-manual,16010,1733534666036, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:26,608 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666595, exclude list is [], retry=0 2024-12-07T01:24:26,612 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:26,612 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:26,613 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:26,616 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733534666036/wal.1733534666595 2024-12-07T01:24:26,617 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:26,617 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7e17939fa8b2a796fc159cc6e5174f27, NAME => 'test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:26,618 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:26,618 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,618 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,620 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,621 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e17939fa8b2a796fc159cc6e5174f27 columnFamilyName a 2024-12-07T01:24:26,621 DEBUG [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:26,622 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(327): Store=7e17939fa8b2a796fc159cc6e5174f27/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:26,622 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,623 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e17939fa8b2a796fc159cc6e5174f27 columnFamilyName b 2024-12-07T01:24:26,623 DEBUG [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:26,624 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(327): Store=7e17939fa8b2a796fc159cc6e5174f27/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:26,624 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,625 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e17939fa8b2a796fc159cc6e5174f27 columnFamilyName c 2024-12-07T01:24:26,626 DEBUG [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:26,626 INFO [StoreOpener-7e17939fa8b2a796fc159cc6e5174f27-1 {}] regionserver.HStore(327): Store=7e17939fa8b2a796fc159cc6e5174f27/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:26,627 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,628 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,630 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,631 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003000 2024-12-07T01:24:26,635 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:26,679 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003000 2024-12-07T01:24:26,681 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000006000 2024-12-07T01:24:26,685 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:26,729 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000006000 2024-12-07T01:24:26,729 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7e17939fa8b2a796fc159cc6e5174f27 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-12-07T01:24:26,780 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/a/1a1603add07a4f7da5f9ece1ca5c65ce is 41, key is test2727/a:100/1733534666384/Put/seqid=0 2024-12-07T01:24:26,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741871_1048 (size=84227) 2024-12-07T01:24:26,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741871_1048 (size=84227) 2024-12-07T01:24:26,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741871_1048 (size=84227) 2024-12-07T01:24:26,800 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/a/1a1603add07a4f7da5f9ece1ca5c65ce 2024-12-07T01:24:26,832 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/b/4bd33c001ebb4cc89562c7fb72108685 is 41, key is test2727/b:100/1733534666425/Put/seqid=0 2024-12-07T01:24:26,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741872_1049 (size=84609) 2024-12-07T01:24:26,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741872_1049 (size=84609) 2024-12-07T01:24:26,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741872_1049 (size=84609) 2024-12-07T01:24:26,838 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/b/4bd33c001ebb4cc89562c7fb72108685 2024-12-07T01:24:26,863 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/c/ae1f7890fcaf40b58cc91711ec99b92c is 41, key is test2727/c:100/1733534666466/Put/seqid=0 2024-12-07T01:24:26,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741873_1050 (size=84609) 2024-12-07T01:24:26,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741873_1050 (size=84609) 2024-12-07T01:24:26,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741873_1050 (size=84609) 2024-12-07T01:24:26,871 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/c/ae1f7890fcaf40b58cc91711ec99b92c 2024-12-07T01:24:26,878 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/a/1a1603add07a4f7da5f9ece1ca5c65ce as hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/a/1a1603add07a4f7da5f9ece1ca5c65ce 2024-12-07T01:24:26,888 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/a/1a1603add07a4f7da5f9ece1ca5c65ce, entries=2000, sequenceid=6000, filesize=82.3 K 2024-12-07T01:24:26,890 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/b/4bd33c001ebb4cc89562c7fb72108685 as hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/b/4bd33c001ebb4cc89562c7fb72108685 2024-12-07T01:24:26,897 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/b/4bd33c001ebb4cc89562c7fb72108685, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-07T01:24:26,899 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/.tmp/c/ae1f7890fcaf40b58cc91711ec99b92c as hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/c/ae1f7890fcaf40b58cc91711ec99b92c 2024-12-07T01:24:26,908 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/c/ae1f7890fcaf40b58cc91711ec99b92c, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-07T01:24:26,909 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 7e17939fa8b2a796fc159cc6e5174f27 in 179ms, sequenceid=6000, compaction requested=false; wal=null 2024-12-07T01:24:26,910 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000003000 2024-12-07T01:24:26,911 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/0000000000000006000 2024-12-07T01:24:26,914 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,915 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,916 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:26,917 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7e17939fa8b2a796fc159cc6e5174f27 2024-12-07T01:24:26,920 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/test2727/7e17939fa8b2a796fc159cc6e5174f27/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-12-07T01:24:26,921 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7e17939fa8b2a796fc159cc6e5174f27; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65628676, jitterRate=-0.022056519985198975}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:26,922 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7e17939fa8b2a796fc159cc6e5174f27: Writing region info on filesystem at 1733534666618Initializing all the Stores at 1733534666620 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534666620Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534666620Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534666620Obtaining lock to block concurrent updates at 1733534666729 (+109 ms)Preparing flush snapshotting stores in 7e17939fa8b2a796fc159cc6e5174f27 at 1733534666729Finished memstore snapshotting test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1733534666729Flushing stores of test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. at 1733534666729Flushing 7e17939fa8b2a796fc159cc6e5174f27/a: creating writer at 1733534666730 (+1 ms)Flushing 7e17939fa8b2a796fc159cc6e5174f27/a: appending metadata at 1733534666779 (+49 ms)Flushing 7e17939fa8b2a796fc159cc6e5174f27/a: closing flushed file at 1733534666779Flushing 7e17939fa8b2a796fc159cc6e5174f27/b: creating writer at 1733534666812 (+33 ms)Flushing 7e17939fa8b2a796fc159cc6e5174f27/b: appending metadata at 1733534666830 (+18 ms)Flushing 7e17939fa8b2a796fc159cc6e5174f27/b: closing flushed file at 1733534666830Flushing 7e17939fa8b2a796fc159cc6e5174f27/c: creating writer at 1733534666844 (+14 ms)Flushing 7e17939fa8b2a796fc159cc6e5174f27/c: appending metadata at 1733534666862 (+18 ms)Flushing 7e17939fa8b2a796fc159cc6e5174f27/c: closing flushed file at 1733534666862Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f23e78f: reopening flushed file at 1733534666877 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@412452a5: reopening flushed file at 1733534666888 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50f41a9b: reopening flushed file at 1733534666898 (+10 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 7e17939fa8b2a796fc159cc6e5174f27 in 179ms, sequenceid=6000, compaction requested=false; wal=null at 1733534666909 (+11 ms)Cleaning up temporary data from old regions at 1733534666915 (+6 ms)Region opened successfully at 1733534666922 (+7 ms) 2024-12-07T01:24:26,923 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-12-07T01:24:26,923 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7e17939fa8b2a796fc159cc6e5174f27, disabling compactions & flushes 2024-12-07T01:24:26,923 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,923 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,923 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. after waiting 0 ms 2024-12-07T01:24:26,923 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,924 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733534666038.7e17939fa8b2a796fc159cc6e5174f27. 2024-12-07T01:24:26,924 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7e17939fa8b2a796fc159cc6e5174f27: Waiting for close lock at 1733534666923Disabling compacts and flushes for region at 1733534666923Disabling writes for close at 1733534666923Writing region close event to WAL at 1733534666924 (+1 ms)Closed at 1733534666924 2024-12-07T01:24:26,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741870_1047 (size=95) 2024-12-07T01:24:26,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741870_1047 (size=95) 2024-12-07T01:24:26,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741870_1047 (size=95) 2024-12-07T01:24:26,933 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:26,933 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733534666595) 2024-12-07T01:24:26,947 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=393 (was 391) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:46646 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44328 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:37214 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=903 (was 841) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=207 (was 207), ProcessCount=11 (was 11), AvailableMemoryMB=7828 (was 8079) 2024-12-07T01:24:26,961 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=393, OpenFileDescriptor=903, MaxFileDescriptor=1048576, SystemLoadAverage=207, ProcessCount=11, AvailableMemoryMB=7828 2024-12-07T01:24:26,979 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:26,987 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:26,987 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1733534666987 2024-12-07T01:24:26,997 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987 2024-12-07T01:24:26,999 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:27,000 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 9e7d4da76684e37fdd9e047710af6618, NAME => 'testSequentialEditLogSeqNum,,1733534666980.9e7d4da76684e37fdd9e047710af6618.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:27,001 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1733534666980.9e7d4da76684e37fdd9e047710af6618.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:27,001 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,001 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,002 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618 doesn't exist for region: 9e7d4da76684e37fdd9e047710af6618 on table testSequentialEditLogSeqNum 2024-12-07T01:24:27,003 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 9e7d4da76684e37fdd9e047710af6618 on table testSequentialEditLogSeqNum 2024-12-07T01:24:27,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741875_1052 (size=62) 2024-12-07T01:24:27,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741875_1052 (size=62) 2024-12-07T01:24:27,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741875_1052 (size=62) 2024-12-07T01:24:27,016 INFO [StoreOpener-9e7d4da76684e37fdd9e047710af6618-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,018 INFO [StoreOpener-9e7d4da76684e37fdd9e047710af6618-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e7d4da76684e37fdd9e047710af6618 columnFamilyName a 2024-12-07T01:24:27,018 DEBUG [StoreOpener-9e7d4da76684e37fdd9e047710af6618-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:27,019 INFO [StoreOpener-9e7d4da76684e37fdd9e047710af6618-1 {}] regionserver.HStore(327): Store=9e7d4da76684e37fdd9e047710af6618/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:27,019 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,021 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,021 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,022 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,022 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,024 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 9e7d4da76684e37fdd9e047710af6618 2024-12-07T01:24:27,026 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:27,026 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 9e7d4da76684e37fdd9e047710af6618; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61193657, jitterRate=-0.08814345300197601}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:27,027 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 9e7d4da76684e37fdd9e047710af6618: Writing region info on filesystem at 1733534667001Initializing all the Stores at 1733534667015 (+14 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534667015Cleaning up temporary data from old regions at 1733534667022 (+7 ms)Region opened successfully at 1733534667027 (+5 ms) 2024-12-07T01:24:27,040 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 9e7d4da76684e37fdd9e047710af6618 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-12-07T01:24:27,061 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/.tmp/a/fee996ef23d645bf848630471691bdd0 is 81, key is testSequentialEditLogSeqNum/a:x0/1733534667027/Put/seqid=0 2024-12-07T01:24:27,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741876_1053 (size=5833) 2024-12-07T01:24:27,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741876_1053 (size=5833) 2024-12-07T01:24:27,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741876_1053 (size=5833) 2024-12-07T01:24:27,472 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/.tmp/a/fee996ef23d645bf848630471691bdd0 2024-12-07T01:24:27,481 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/.tmp/a/fee996ef23d645bf848630471691bdd0 as hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/a/fee996ef23d645bf848630471691bdd0 2024-12-07T01:24:27,493 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/a/fee996ef23d645bf848630471691bdd0, entries=10, sequenceid=13, filesize=5.7 K 2024-12-07T01:24:27,494 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 9e7d4da76684e37fdd9e047710af6618 in 454ms, sequenceid=13, compaction requested=false 2024-12-07T01:24:27,494 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9e7d4da76684e37fdd9e047710af6618: 2024-12-07T01:24:27,501 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T01:24:27,501 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T01:24:27,501 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T01:24:27,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T01:24:27,502 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T01:24:27,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741874_1051 (size=1844) 2024-12-07T01:24:27,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741874_1051 (size=1844) 2024-12-07T01:24:27,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741874_1051 (size=1844) 2024-12-07T01:24:27,519 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987, size=1.8 K (1844bytes) 2024-12-07T01:24:27,520 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987 2024-12-07T01:24:27,520 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987 after 0ms 2024-12-07T01:24:27,523 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:27,523 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987 took 4ms 2024-12-07T01:24:27,526 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987 so closing down 2024-12-07T01:24:27,526 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:27,527 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733534666987.temp 2024-12-07T01:24:27,529 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000003-wal.1733534666987.temp 2024-12-07T01:24:27,529 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741877_1054 (size=1477) 2024-12-07T01:24:27,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741877_1054 (size=1477) 2024-12-07T01:24:27,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741877_1054 (size=1477) 2024-12-07T01:24:27,544 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000003-wal.1733534666987.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:27,546 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000003-wal.1733534666987.temp to hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000020 2024-12-07T01:24:27,546 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 23 ms; skipped=2; WAL=hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987, size=1.8 K, length=1844, corrupted=false, cancelled=false 2024-12-07T01:24:27,546 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987, journal: Splitting hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987, size=1.8 K (1844bytes) at 1733534667519Finishing writing output for hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987 so closing down at 1733534667526 (+7 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000003-wal.1733534666987.temp at 1733534667529 (+3 ms)3 split writer threads finished at 1733534667529Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000003-wal.1733534666987.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733534667544 (+15 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000003-wal.1733534666987.temp to hdfs://localhost:42771/hbase/data/default/testSequentialEditLogSeqNum/9e7d4da76684e37fdd9e047710af6618/recovered.edits/0000000000000000020 at 1733534667546 (+2 ms)Processed 17 edits across 1 Regions in 23 ms; skipped=2; WAL=hdfs://localhost:42771/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733534666978/wal.1733534666987, size=1.8 K, length=1844, corrupted=false, cancelled=false at 1733534667546 2024-12-07T01:24:27,562 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=398 (was 393) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:46646 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44328 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=939 (was 903) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=207 (was 207), ProcessCount=11 (was 11), AvailableMemoryMB=7816 (was 7828) 2024-12-07T01:24:27,574 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=398, OpenFileDescriptor=939, MaxFileDescriptor=1048576, SystemLoadAverage=207, ProcessCount=11, AvailableMemoryMB=7815 2024-12-07T01:24:27,589 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:27,591 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:27,593 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:27,597 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-44063082, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-44063082, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:27,611 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-44063082/hregion-44063082.1733534667598, exclude list is [], retry=0 2024-12-07T01:24:27,615 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:27,615 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:27,615 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:27,623 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-44063082/hregion-44063082.1733534667598 2024-12-07T01:24:27,624 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:27,624 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => e4a40c6b4408b4cd18b7509d688cdf6c, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:27,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741879_1056 (size=70) 2024-12-07T01:24:27,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741879_1056 (size=70) 2024-12-07T01:24:27,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741879_1056 (size=70) 2024-12-07T01:24:27,662 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:27,664 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,666 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName a 2024-12-07T01:24:27,666 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:27,666 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:27,666 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,668 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName b 2024-12-07T01:24:27,668 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:27,669 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:27,669 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,670 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName c 2024-12-07T01:24:27,670 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:27,671 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:27,671 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,672 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,672 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,674 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,674 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,675 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:27,676 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,679 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:27,679 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e4a40c6b4408b4cd18b7509d688cdf6c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62272504, jitterRate=-0.07206737995147705}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:27,680 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e4a40c6b4408b4cd18b7509d688cdf6c: Writing region info on filesystem at 1733534667662Initializing all the Stores at 1733534667663 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534667663Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534667664 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534667664Cleaning up temporary data from old regions at 1733534667674 (+10 ms)Region opened successfully at 1733534667680 (+6 ms) 2024-12-07T01:24:27,680 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e4a40c6b4408b4cd18b7509d688cdf6c, disabling compactions & flushes 2024-12-07T01:24:27,680 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:27,680 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:27,680 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. after waiting 0 ms 2024-12-07T01:24:27,680 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:27,681 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:27,681 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e4a40c6b4408b4cd18b7509d688cdf6c: Waiting for close lock at 1733534667680Disabling compacts and flushes for region at 1733534667680Disabling writes for close at 1733534667680Writing region close event to WAL at 1733534667681 (+1 ms)Closed at 1733534667681 2024-12-07T01:24:27,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741878_1055 (size=95) 2024-12-07T01:24:27,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741878_1055 (size=95) 2024-12-07T01:24:27,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741878_1055 (size=95) 2024-12-07T01:24:27,687 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-44063082/hregion-44063082.1733534667598 not finished, retry = 0 2024-12-07T01:24:27,790 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:27,790 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-44063082:(num 1733534667598) 2024-12-07T01:24:27,790 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:27,793 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:27,805 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793, exclude list is [], retry=0 2024-12-07T01:24:27,809 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:27,809 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:27,810 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:27,812 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 2024-12-07T01:24:27,812 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:27,812 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e4a40c6b4408b4cd18b7509d688cdf6c, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:27,812 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:27,812 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,813 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,814 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,815 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName a 2024-12-07T01:24:27,815 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:27,816 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:27,816 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,817 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName b 2024-12-07T01:24:27,817 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:27,818 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:27,818 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,819 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName c 2024-12-07T01:24:27,819 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:27,819 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:27,819 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,820 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,821 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,822 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,822 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,823 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:27,824 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:27,825 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e4a40c6b4408b4cd18b7509d688cdf6c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65294196, jitterRate=-0.02704066038131714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:27,826 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e4a40c6b4408b4cd18b7509d688cdf6c: Writing region info on filesystem at 1733534667813Initializing all the Stores at 1733534667814 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534667814Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534667814Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534667814Cleaning up temporary data from old regions at 1733534667822 (+8 ms)Region opened successfully at 1733534667825 (+3 ms) 2024-12-07T01:24:27,830 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1733534667828/Put/seqid=0 2024-12-07T01:24:27,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741881_1058 (size=4826) 2024-12-07T01:24:27,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741881_1058 (size=4826) 2024-12-07T01:24:27,838 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42771/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in e4a40c6b4408b4cd18b7509d688cdf6c/a 2024-12-07T01:24:27,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741881_1058 (size=4826) 2024-12-07T01:24:27,845 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-12-07T01:24:27,845 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T01:24:27,846 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e4a40c6b4408b4cd18b7509d688cdf6c: 2024-12-07T01:24:27,848 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/a/bbd62f99d9494567a87aefcd89e3e6fd_SeqId_3_ 2024-12-07T01:24:27,848 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42771/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into e4a40c6b4408b4cd18b7509d688cdf6c/a as hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/a/bbd62f99d9494567a87aefcd89e3e6fd_SeqId_3_ - updating store file list. 2024-12-07T01:24:27,854 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for bbd62f99d9494567a87aefcd89e3e6fd_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:27,854 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/a/bbd62f99d9494567a87aefcd89e3e6fd_SeqId_3_ into e4a40c6b4408b4cd18b7509d688cdf6c/a 2024-12-07T01:24:27,854 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42771/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into e4a40c6b4408b4cd18b7509d688cdf6c/a (new location: hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/a/bbd62f99d9494567a87aefcd89e3e6fd_SeqId_3_) 2024-12-07T01:24:27,890 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793, size=0 (0bytes) 2024-12-07T01:24:27,891 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 might be still open, length is 0 2024-12-07T01:24:27,891 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 2024-12-07T01:24:27,891 WARN [IPC Server handler 4 on default port 42771 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-12-07T01:24:27,891 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 after 0ms 2024-12-07T01:24:28,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:37330 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:38509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37330 dst: /127.0.0.1:38509 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38509 remote=/127.0.0.1:37330]. Total timeout mills is 60000, 59614 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:28,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44564 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:40681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44564 dst: /127.0.0.1:40681 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:28,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:46756 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46756 dst: /127.0.0.1:35777 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:28,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741880_1059 (size=473) 2024-12-07T01:24:28,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741880_1059 (size=473) 2024-12-07T01:24:28,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741880_1059 (size=473) 2024-12-07T01:24:31,892 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 after 4001ms 2024-12-07T01:24:31,895 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:31,896 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 took 4005ms 2024-12-07T01:24:31,898 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793; continuing. 2024-12-07T01:24:31,898 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 so closing down 2024-12-07T01:24:31,898 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:31,900 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1733534667793.temp 2024-12-07T01:24:31,901 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005-wal.1733534667793.temp 2024-12-07T01:24:31,902 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:31,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741882_1060 (size=259) 2024-12-07T01:24:31,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741882_1060 (size=259) 2024-12-07T01:24:31,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741882_1060 (size=259) 2024-12-07T01:24:31,910 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005-wal.1733534667793.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:31,912 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005-wal.1733534667793.temp to hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005 2024-12-07T01:24:31,912 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 16 ms; skipped=1; WAL=hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T01:24:31,912 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793, journal: Splitting hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793, size=0 (0bytes) at 1733534667890Finishing writing output for hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 so closing down at 1733534671898 (+4008 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005-wal.1733534667793.temp at 1733534671901 (+3 ms)3 split writer threads finished at 1733534671902 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005-wal.1733534667793.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733534671910 (+8 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005-wal.1733534667793.temp to hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005 at 1733534671912 (+2 ms)Processed 2 edits across 1 Regions in 16 ms; skipped=1; WAL=hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793, size=0, length=0, corrupted=false, cancelled=false at 1733534671912 2024-12-07T01:24:31,914 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534667793 2024-12-07T01:24:31,915 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005 2024-12-07T01:24:31,915 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:31,918 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:31,929 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534671918, exclude list is [], retry=0 2024-12-07T01:24:31,932 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:31,932 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:31,933 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:31,934 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534671918 2024-12-07T01:24:31,935 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:31,935 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e4a40c6b4408b4cd18b7509d688cdf6c, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:31,935 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:31,935 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,935 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,937 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,938 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName a 2024-12-07T01:24:31,938 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:31,948 DEBUG [StoreFileOpener-e4a40c6b4408b4cd18b7509d688cdf6c-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for bbd62f99d9494567a87aefcd89e3e6fd_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:31,948 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/a/bbd62f99d9494567a87aefcd89e3e6fd_SeqId_3_ 2024-12-07T01:24:31,949 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:31,949 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,950 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName b 2024-12-07T01:24:31,950 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:31,951 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:31,951 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,952 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4a40c6b4408b4cd18b7509d688cdf6c columnFamilyName c 2024-12-07T01:24:31,952 DEBUG [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:31,953 INFO [StoreOpener-e4a40c6b4408b4cd18b7509d688cdf6c-1 {}] regionserver.HStore(327): Store=e4a40c6b4408b4cd18b7509d688cdf6c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:31,953 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,954 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,956 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:31,956 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005 2024-12-07T01:24:31,958 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:31,959 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005 2024-12-07T01:24:31,959 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e4a40c6b4408b4cd18b7509d688cdf6c 3/3 column families, dataSize=58 B heapSize=904 B 2024-12-07T01:24:31,973 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/.tmp/a/72d1fa7ba2064422ae89b6403b64905a is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1733534667859/Put/seqid=0 2024-12-07T01:24:31,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741884_1062 (size=5149) 2024-12-07T01:24:31,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741884_1062 (size=5149) 2024-12-07T01:24:31,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741884_1062 (size=5149) 2024-12-07T01:24:31,982 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/.tmp/a/72d1fa7ba2064422ae89b6403b64905a 2024-12-07T01:24:31,988 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/.tmp/a/72d1fa7ba2064422ae89b6403b64905a as hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/a/72d1fa7ba2064422ae89b6403b64905a 2024-12-07T01:24:31,999 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/a/72d1fa7ba2064422ae89b6403b64905a, entries=1, sequenceid=5, filesize=5.0 K 2024-12-07T01:24:32,000 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for e4a40c6b4408b4cd18b7509d688cdf6c in 41ms, sequenceid=5, compaction requested=false; wal=null 2024-12-07T01:24:32,000 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/0000000000000000005 2024-12-07T01:24:32,002 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:32,002 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:32,003 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:32,004 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e4a40c6b4408b4cd18b7509d688cdf6c 2024-12-07T01:24:32,007 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e4a40c6b4408b4cd18b7509d688cdf6c/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-12-07T01:24:32,008 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e4a40c6b4408b4cd18b7509d688cdf6c; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74030747, jitterRate=0.1031440943479538}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:32,008 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e4a40c6b4408b4cd18b7509d688cdf6c: Writing region info on filesystem at 1733534671935Initializing all the Stores at 1733534671936 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534671937 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534671937Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534671937Obtaining lock to block concurrent updates at 1733534671959 (+22 ms)Preparing flush snapshotting stores in e4a40c6b4408b4cd18b7509d688cdf6c at 1733534671959Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1733534671959Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. at 1733534671959Flushing e4a40c6b4408b4cd18b7509d688cdf6c/a: creating writer at 1733534671959Flushing e4a40c6b4408b4cd18b7509d688cdf6c/a: appending metadata at 1733534671973 (+14 ms)Flushing e4a40c6b4408b4cd18b7509d688cdf6c/a: closing flushed file at 1733534671973Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52563e48: reopening flushed file at 1733534671987 (+14 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for e4a40c6b4408b4cd18b7509d688cdf6c in 41ms, sequenceid=5, compaction requested=false; wal=null at 1733534672000 (+13 ms)Cleaning up temporary data from old regions at 1733534672002 (+2 ms)Region opened successfully at 1733534672008 (+6 ms) 2024-12-07T01:24:32,012 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e4a40c6b4408b4cd18b7509d688cdf6c, disabling compactions & flushes 2024-12-07T01:24:32,012 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:32,012 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:32,012 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. after waiting 0 ms 2024-12-07T01:24:32,012 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:32,013 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733534667590.e4a40c6b4408b4cd18b7509d688cdf6c. 2024-12-07T01:24:32,013 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e4a40c6b4408b4cd18b7509d688cdf6c: Waiting for close lock at 1733534672012Disabling compacts and flushes for region at 1733534672012Disabling writes for close at 1733534672012Writing region close event to WAL at 1733534672013 (+1 ms)Closed at 1733534672013 2024-12-07T01:24:32,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741883_1061 (size=95) 2024-12-07T01:24:32,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741883_1061 (size=95) 2024-12-07T01:24:32,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741883_1061 (size=95) 2024-12-07T01:24:32,018 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:32,018 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733534671918) 2024-12-07T01:24:32,031 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=404 (was 398) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-141649434_22 at /127.0.0.1:40514 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42771 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:42771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-141649434_22 at /127.0.0.1:34852 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-141649434_22 at /127.0.0.1:37214 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=999 (was 939) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=198 (was 207), ProcessCount=11 (was 11), AvailableMemoryMB=7820 (was 7815) - AvailableMemoryMB LEAK? - 2024-12-07T01:24:32,041 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=404, OpenFileDescriptor=999, MaxFileDescriptor=1048576, SystemLoadAverage=198, ProcessCount=11, AvailableMemoryMB=7820 2024-12-07T01:24:32,056 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:32,060 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T01:24:32,064 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:32,066 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3e1286c5 2024-12-07T01:24:32,066 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T01:24:32,068 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58044, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T01:24:32,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T01:24:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-12-07T01:24:32,080 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T01:24:32,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-12-07T01:24:32,083 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:32,085 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T01:24:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T01:24:32,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741885_1063 (size=694) 2024-12-07T01:24:32,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741885_1063 (size=694) 2024-12-07T01:24:32,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741885_1063 (size=694) 2024-12-07T01:24:32,103 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4df18ab8d6a713905826e338f7d67d7c, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee 2024-12-07T01:24:32,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741886_1064 (size=77) 2024-12-07T01:24:32,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741886_1064 (size=77) 2024-12-07T01:24:32,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741886_1064 (size=77) 2024-12-07T01:24:32,114 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:32,114 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 4df18ab8d6a713905826e338f7d67d7c, disabling compactions & flushes 2024-12-07T01:24:32,114 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,114 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,114 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. after waiting 0 ms 2024-12-07T01:24:32,114 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,114 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,114 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4df18ab8d6a713905826e338f7d67d7c: Waiting for close lock at 1733534672114Disabling compacts and flushes for region at 1733534672114Disabling writes for close at 1733534672114Writing region close event to WAL at 1733534672114Closed at 1733534672114 2024-12-07T01:24:32,116 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T01:24:32,120 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1733534672116"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733534672116"}]},"ts":"1733534672116"} 2024-12-07T01:24:32,124 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T01:24:32,127 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T01:24:32,130 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733534672127"}]},"ts":"1733534672127"} 2024-12-07T01:24:32,134 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-12-07T01:24:32,134 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ec1863dc21e5=0} racks are {/default-rack=0} 2024-12-07T01:24:32,136 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T01:24:32,136 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T01:24:32,136 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T01:24:32,136 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T01:24:32,136 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T01:24:32,136 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T01:24:32,136 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T01:24:32,136 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T01:24:32,136 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T01:24:32,136 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T01:24:32,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN}] 2024-12-07T01:24:32,139 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN 2024-12-07T01:24:32,140 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN; state=OFFLINE, location=ec1863dc21e5,45349,1733534655753; forceNewPlan=false, retain=false 2024-12-07T01:24:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T01:24:32,294 INFO [ec1863dc21e5:40763 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T01:24:32,295 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPENING, regionLocation=ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:32,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN because future has completed 2024-12-07T01:24:32,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753}] 2024-12-07T01:24:32,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T01:24:32,456 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T01:24:32,460 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42687, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T01:24:32,467 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,468 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4df18ab8d6a713905826e338f7d67d7c, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:32,468 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,468 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:32,468 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,468 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,470 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,472 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf1 2024-12-07T01:24:32,472 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:32,473 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:32,473 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,475 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf2 2024-12-07T01:24:32,475 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:32,475 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:32,475 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,476 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,477 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,477 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,477 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,478 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T01:24:32,480 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,482 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:32,482 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4df18ab8d6a713905826e338f7d67d7c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59399669, jitterRate=-0.11487595736980438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T01:24:32,482 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,483 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4df18ab8d6a713905826e338f7d67d7c: Running coprocessor pre-open hook at 1733534672469Writing region info on filesystem at 1733534672469Initializing all the Stores at 1733534672470 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534672470Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534672470Cleaning up temporary data from old regions at 1733534672477 (+7 ms)Running coprocessor post-open hooks at 1733534672483 (+6 ms)Region opened successfully at 1733534672483 2024-12-07T01:24:32,485 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., pid=6, masterSystemTime=1733534672456 2024-12-07T01:24:32,487 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,487 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPEN, openSeqNum=2, regionLocation=ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:32,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753 because future has completed 2024-12-07T01:24:32,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T01:24:32,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753 in 192 msec 2024-12-07T01:24:32,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T01:24:32,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN in 360 msec 2024-12-07T01:24:32,501 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T01:24:32,501 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733534672501"}]},"ts":"1733534672501"} 2024-12-07T01:24:32,503 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-12-07T01:24:32,505 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T01:24:32,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 431 msec 2024-12-07T01:24:32,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T01:24:32,720 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-12-07T01:24:32,721 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-12-07T01:24:32,722 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T01:24:32,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-12-07T01:24:32,729 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T01:24:32,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-12-07T01:24:32,745 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=2] 2024-12-07T01:24:32,746 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T01:24:32,748 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59954, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T01:24:32,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=4df18ab8d6a713905826e338f7d67d7c, source=ec1863dc21e5,45349,1733534655753, destination=ec1863dc21e5,45471,1733534655575, warming up region on ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:32,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T01:24:32,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=4df18ab8d6a713905826e338f7d67d7c, source=ec1863dc21e5,45349,1733534655753, destination=ec1863dc21e5,45471,1733534655575, running balancer 2024-12-07T01:24:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE 2024-12-07T01:24:32,766 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59413, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T01:24:32,766 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE 2024-12-07T01:24:32,769 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=CLOSING, regionLocation=ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:32,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(7855): Warmup {ENCODED => 4df18ab8d6a713905826e338f7d67d7c, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:32,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE because future has completed 2024-12-07T01:24:32,772 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,772 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T01:24:32,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753}] 2024-12-07T01:24:32,773 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf1 2024-12-07T01:24:32,773 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:32,774 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:32,774 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,775 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf2 2024-12-07T01:24:32,775 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:32,775 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(1722): Closing 4df18ab8d6a713905826e338f7d67d7c, disabling compactions & flushes 2024-12-07T01:24:32,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. after waiting 0 ms 2024-12-07T01:24:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] regionserver.HRegion(1676): Region close journal for 4df18ab8d6a713905826e338f7d67d7c: Waiting for close lock at 1733534672776Disabling compacts and flushes for region at 1733534672776Disabling writes for close at 1733534672776Writing region close event to WAL at 1733534672776Closed at 1733534672776 2024-12-07T01:24:32,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-12-07T01:24:32,935 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,935 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-07T01:24:32,937 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 4df18ab8d6a713905826e338f7d67d7c, disabling compactions & flushes 2024-12-07T01:24:32,937 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,937 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,937 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. after waiting 0 ms 2024-12-07T01:24:32,937 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,937 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 4df18ab8d6a713905826e338f7d67d7c 2/2 column families, dataSize=31 B heapSize=616 B 2024-12-07T01:24:32,955 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad is 35, key is r1/cf1:q/1733534672749/Put/seqid=0 2024-12-07T01:24:32,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741887_1065 (size=4783) 2024-12-07T01:24:32,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741887_1065 (size=4783) 2024-12-07T01:24:32,962 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad 2024-12-07T01:24:32,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741887_1065 (size=4783) 2024-12-07T01:24:32,970 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad 2024-12-07T01:24:32,975 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T01:24:32,976 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 4df18ab8d6a713905826e338f7d67d7c in 39ms, sequenceid=5, compaction requested=false 2024-12-07T01:24:32,976 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-12-07T01:24:32,982 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T01:24:32,984 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:32,984 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 4df18ab8d6a713905826e338f7d67d7c: Waiting for close lock at 1733534672937Running coprocessor pre-close hooks at 1733534672937Disabling compacts and flushes for region at 1733534672937Disabling writes for close at 1733534672937Obtaining lock to block concurrent updates at 1733534672937Preparing flush snapshotting stores in 4df18ab8d6a713905826e338f7d67d7c at 1733534672937Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1733534672938 (+1 ms)Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. at 1733534672939 (+1 ms)Flushing 4df18ab8d6a713905826e338f7d67d7c/cf1: creating writer at 1733534672939Flushing 4df18ab8d6a713905826e338f7d67d7c/cf1: appending metadata at 1733534672955 (+16 ms)Flushing 4df18ab8d6a713905826e338f7d67d7c/cf1: closing flushed file at 1733534672955Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50569139: reopening flushed file at 1733534672969 (+14 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 4df18ab8d6a713905826e338f7d67d7c in 39ms, sequenceid=5, compaction requested=false at 1733534672976 (+7 ms)Writing region close event to WAL at 1733534672978 (+2 ms)Running coprocessor post-close hooks at 1733534672982 (+4 ms)Closed at 1733534672984 (+2 ms) 2024-12-07T01:24:32,985 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 4df18ab8d6a713905826e338f7d67d7c move to ec1863dc21e5,45471,1733534655575 record at close sequenceid=5 2024-12-07T01:24:32,988 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:32,989 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=CLOSED 2024-12-07T01:24:32,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753 because future has completed 2024-12-07T01:24:32,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T01:24:32,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753 in 221 msec 2024-12-07T01:24:32,999 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE; state=CLOSED, location=ec1863dc21e5,45471,1733534655575; forceNewPlan=false, retain=false 2024-12-07T01:24:33,149 INFO [ec1863dc21e5:40763 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T01:24:33,150 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPENING, regionLocation=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:33,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE because future has completed 2024-12-07T01:24:33,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575}] 2024-12-07T01:24:33,312 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,312 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 4df18ab8d6a713905826e338f7d67d7c, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:33,312 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,313 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:33,313 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,313 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,315 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,316 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf1 2024-12-07T01:24:33,316 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:33,323 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad 2024-12-07T01:24:33,323 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:33,324 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,324 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf2 2024-12-07T01:24:33,325 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:33,325 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:33,325 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,326 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,327 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,328 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,328 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,329 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T01:24:33,330 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,331 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 4df18ab8d6a713905826e338f7d67d7c; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60121597, jitterRate=-0.10411839187145233}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T01:24:33,331 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,332 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 4df18ab8d6a713905826e338f7d67d7c: Running coprocessor pre-open hook at 1733534673313Writing region info on filesystem at 1733534673313Initializing all the Stores at 1733534673315 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534673315Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534673315Cleaning up temporary data from old regions at 1733534673328 (+13 ms)Running coprocessor post-open hooks at 1733534673331 (+3 ms)Region opened successfully at 1733534673332 (+1 ms) 2024-12-07T01:24:33,390 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., pid=9, masterSystemTime=1733534673306 2024-12-07T01:24:33,394 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,394 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,395 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPEN, openSeqNum=9, regionLocation=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:33,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575 because future has completed 2024-12-07T01:24:33,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-07T01:24:33,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575 in 248 msec 2024-12-07T01:24:33,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE in 639 msec 2024-12-07T01:24:33,422 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T01:24:33,424 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45786, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T01:24:33,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.3:59954 deadline: 1733534733428, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45471 startCode=1733534655575. As of locationSeqNum=5. 2024-12-07T01:24:33,457 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45471 startCode=1733534655575. As of locationSeqNum=5. 2024-12-07T01:24:33,458 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45471 startCode=1733534655575. As of locationSeqNum=5. 2024-12-07T01:24:33,458 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45471,1733534655575, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45471 startCode=1733534655575. As of locationSeqNum=5. 2024-12-07T01:24:33,569 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T01:24:33,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45796, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T01:24:33,580 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4df18ab8d6a713905826e338f7d67d7c 2/2 column families, dataSize=50 B heapSize=720 B 2024-12-07T01:24:33,597 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf1/d7775f9480ce4d5a85e49c6f049979e8 is 29, key is r1/cf1:/1733534673572/DeleteFamily/seqid=0 2024-12-07T01:24:33,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741888_1066 (size=4906) 2024-12-07T01:24:33,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741888_1066 (size=4906) 2024-12-07T01:24:33,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741888_1066 (size=4906) 2024-12-07T01:24:33,607 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf1/d7775f9480ce4d5a85e49c6f049979e8 2024-12-07T01:24:33,613 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d7775f9480ce4d5a85e49c6f049979e8 2024-12-07T01:24:33,628 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf2/25faa49aa30445a08f70491e4b413eb9 is 29, key is r1/cf2:/1733534673572/DeleteFamily/seqid=0 2024-12-07T01:24:33,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741889_1067 (size=4906) 2024-12-07T01:24:33,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741889_1067 (size=4906) 2024-12-07T01:24:33,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741889_1067 (size=4906) 2024-12-07T01:24:33,635 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf2/25faa49aa30445a08f70491e4b413eb9 2024-12-07T01:24:33,640 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 25faa49aa30445a08f70491e4b413eb9 2024-12-07T01:24:33,642 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf1/d7775f9480ce4d5a85e49c6f049979e8 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/d7775f9480ce4d5a85e49c6f049979e8 2024-12-07T01:24:33,648 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d7775f9480ce4d5a85e49c6f049979e8 2024-12-07T01:24:33,648 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/d7775f9480ce4d5a85e49c6f049979e8, entries=1, sequenceid=12, filesize=4.8 K 2024-12-07T01:24:33,649 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf2/25faa49aa30445a08f70491e4b413eb9 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/25faa49aa30445a08f70491e4b413eb9 2024-12-07T01:24:33,658 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 25faa49aa30445a08f70491e4b413eb9 2024-12-07T01:24:33,659 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/25faa49aa30445a08f70491e4b413eb9, entries=1, sequenceid=12, filesize=4.8 K 2024-12-07T01:24:33,660 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 4df18ab8d6a713905826e338f7d67d7c in 80ms, sequenceid=12, compaction requested=false 2024-12-07T01:24:33,660 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4df18ab8d6a713905826e338f7d67d7c: 2024-12-07T01:24:33,662 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-07T01:24:33,663 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4df18ab8d6a713905826e338f7d67d7c/cf1 is initiating major compaction (all files) 2024-12-07T01:24:33,664 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T01:24:33,664 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:33,664 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4df18ab8d6a713905826e338f7d67d7c/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,665 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad, hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/d7775f9480ce4d5a85e49c6f049979e8] into tmpdir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp, totalSize=9.5 K 2024-12-07T01:24:33,666 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b2b4f4df1d1548a2afb2478ce9a5a5ad, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733534672749 2024-12-07T01:24:33,666 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d7775f9480ce4d5a85e49c6f049979e8, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-07T01:24:33,678 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4df18ab8d6a713905826e338f7d67d7c#cf1#compaction#16 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T01:24:33,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741890_1068 (size=4626) 2024-12-07T01:24:33,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741890_1068 (size=4626) 2024-12-07T01:24:33,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741890_1068 (size=4626) 2024-12-07T01:24:33,694 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf1/cd65736c10b446eabe28864dc5b25ae7 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/cd65736c10b446eabe28864dc5b25ae7 2024-12-07T01:24:33,708 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 4df18ab8d6a713905826e338f7d67d7c/cf1 of 4df18ab8d6a713905826e338f7d67d7c into cd65736c10b446eabe28864dc5b25ae7(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T01:24:33,708 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4df18ab8d6a713905826e338f7d67d7c: 2024-12-07T01:24:33,709 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-07T01:24:33,709 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4df18ab8d6a713905826e338f7d67d7c/cf2 is initiating major compaction (all files) 2024-12-07T01:24:33,709 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T01:24:33,709 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T01:24:33,709 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4df18ab8d6a713905826e338f7d67d7c/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,709 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/25faa49aa30445a08f70491e4b413eb9] into tmpdir=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp, totalSize=4.8 K 2024-12-07T01:24:33,710 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 25faa49aa30445a08f70491e4b413eb9, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-07T01:24:33,715 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4df18ab8d6a713905826e338f7d67d7c#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T01:24:33,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741891_1069 (size=4592) 2024-12-07T01:24:33,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741891_1069 (size=4592) 2024-12-07T01:24:33,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741891_1069 (size=4592) 2024-12-07T01:24:33,729 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/.tmp/cf2/1c6d273452ce499da738dba6b1c10d07 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/1c6d273452ce499da738dba6b1c10d07 2024-12-07T01:24:33,740 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 4df18ab8d6a713905826e338f7d67d7c/cf2 of 4df18ab8d6a713905826e338f7d67d7c into 1c6d273452ce499da738dba6b1c10d07(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T01:24:33,740 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4df18ab8d6a713905826e338f7d67d7c: 2024-12-07T01:24:33,744 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=4df18ab8d6a713905826e338f7d67d7c, source=ec1863dc21e5,45471,1733534655575, destination=ec1863dc21e5,45349,1733534655753, warming up region on ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:33,744 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=4df18ab8d6a713905826e338f7d67d7c, source=ec1863dc21e5,45471,1733534655575, destination=ec1863dc21e5,45349,1733534655753, running balancer 2024-12-07T01:24:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE 2024-12-07T01:24:33,746 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE 2024-12-07T01:24:33,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(7855): Warmup {ENCODED => 4df18ab8d6a713905826e338f7d67d7c, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:33,748 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=CLOSING, regionLocation=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:33,748 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,750 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf1 2024-12-07T01:24:33,750 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:33,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE because future has completed 2024-12-07T01:24:33,752 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T01:24:33,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575}] 2024-12-07T01:24:33,764 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad 2024-12-07T01:24:33,769 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/cd65736c10b446eabe28864dc5b25ae7 2024-12-07T01:24:33,775 INFO [StoreFileOpener-4df18ab8d6a713905826e338f7d67d7c-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d7775f9480ce4d5a85e49c6f049979e8 2024-12-07T01:24:33,776 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/d7775f9480ce4d5a85e49c6f049979e8 2024-12-07T01:24:33,776 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:33,776 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,777 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf2 2024-12-07T01:24:33,777 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:33,785 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/1c6d273452ce499da738dba6b1c10d07 2024-12-07T01:24:33,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-12-07T01:24:33,797 INFO [StoreFileOpener-4df18ab8d6a713905826e338f7d67d7c-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 25faa49aa30445a08f70491e4b413eb9 2024-12-07T01:24:33,797 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/25faa49aa30445a08f70491e4b413eb9 2024-12-07T01:24:33,798 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(1722): Closing 4df18ab8d6a713905826e338f7d67d7c, disabling compactions & flushes 2024-12-07T01:24:33,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. after waiting 0 ms 2024-12-07T01:24:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45349 {}] regionserver.HRegion(1676): Region close journal for 4df18ab8d6a713905826e338f7d67d7c: Waiting for close lock at 1733534673798Disabling compacts and flushes for region at 1733534673798Disabling writes for close at 1733534673798Writing region close event to WAL at 1733534673799 (+1 ms)Closed at 1733534673799 2024-12-07T01:24:33,907 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,908 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-07T01:24:33,908 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 4df18ab8d6a713905826e338f7d67d7c, disabling compactions & flushes 2024-12-07T01:24:33,908 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,908 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,908 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. after waiting 0 ms 2024-12-07T01:24:33,908 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,909 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad, hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/d7775f9480ce4d5a85e49c6f049979e8] to archive 2024-12-07T01:24:33,912 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T01:24:33,916 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad to hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/b2b4f4df1d1548a2afb2478ce9a5a5ad 2024-12-07T01:24:33,918 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/d7775f9480ce4d5a85e49c6f049979e8 to hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/d7775f9480ce4d5a85e49c6f049979e8 2024-12-07T01:24:33,934 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/25faa49aa30445a08f70491e4b413eb9] to archive 2024-12-07T01:24:33,935 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T01:24:33,937 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/25faa49aa30445a08f70491e4b413eb9 to hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/25faa49aa30445a08f70491e4b413eb9 2024-12-07T01:24:33,942 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-12-07T01:24:33,943 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:33,943 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 4df18ab8d6a713905826e338f7d67d7c: Waiting for close lock at 1733534673908Running coprocessor pre-close hooks at 1733534673908Disabling compacts and flushes for region at 1733534673908Disabling writes for close at 1733534673908Writing region close event to WAL at 1733534673939 (+31 ms)Running coprocessor post-close hooks at 1733534673943 (+4 ms)Closed at 1733534673943 2024-12-07T01:24:33,943 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 4df18ab8d6a713905826e338f7d67d7c move to ec1863dc21e5,45349,1733534655753 record at close sequenceid=12 2024-12-07T01:24:33,946 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:33,947 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=CLOSED 2024-12-07T01:24:33,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575 because future has completed 2024-12-07T01:24:33,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-07T01:24:33,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575 in 199 msec 2024-12-07T01:24:33,955 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE; state=CLOSED, location=ec1863dc21e5,45349,1733534655753; forceNewPlan=false, retain=false 2024-12-07T01:24:34,106 INFO [ec1863dc21e5:40763 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T01:24:34,106 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPENING, regionLocation=ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:34,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE because future has completed 2024-12-07T01:24:34,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753}] 2024-12-07T01:24:34,269 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,270 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 4df18ab8d6a713905826e338f7d67d7c, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:34,270 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,270 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:34,270 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,270 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,272 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,273 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf1 2024-12-07T01:24:34,273 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:34,280 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/cd65736c10b446eabe28864dc5b25ae7 2024-12-07T01:24:34,280 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:34,280 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,281 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf2 2024-12-07T01:24:34,281 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:34,288 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/1c6d273452ce499da738dba6b1c10d07 2024-12-07T01:24:34,288 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:34,288 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,289 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,290 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,291 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,291 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,291 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T01:24:34,293 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,294 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 4df18ab8d6a713905826e338f7d67d7c; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65892509, jitterRate=-0.018125101923942566}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T01:24:34,294 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,294 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 4df18ab8d6a713905826e338f7d67d7c: Running coprocessor pre-open hook at 1733534674270Writing region info on filesystem at 1733534674270Initializing all the Stores at 1733534674271 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534674272 (+1 ms)Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534674272Cleaning up temporary data from old regions at 1733534674291 (+19 ms)Running coprocessor post-open hooks at 1733534674294 (+3 ms)Region opened successfully at 1733534674294 2024-12-07T01:24:34,295 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., pid=12, masterSystemTime=1733534674266 2024-12-07T01:24:34,298 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,298 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,299 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPEN, openSeqNum=18, regionLocation=ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:34,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753 because future has completed 2024-12-07T01:24:34,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-07T01:24:34,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45349,1733534655753 in 192 msec 2024-12-07T01:24:34,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, REOPEN/MOVE in 562 msec 2024-12-07T01:24:34,348 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T01:24:34,350 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59968, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T01:24:34,352 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server ec1863dc21e5,45349,1733534655753: testing ***** 2024-12-07T01:24:34,352 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-07T01:24:34,354 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-07T01:24:34,356 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-07T01:24:34,358 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-07T01:24:34,359 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-07T01:24:34,366 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 357918208 }, "NonHeapMemoryUsage": { "committed": 171376640, "init": 7667712, "max": -1, "used": 168781536 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "ec1863dc21e5", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2071, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 1, "ProcessCallTime_max": 26, "ProcessCallTime_mean": 6, "ProcessCallTime_25th_percentile": 7, "ProcessCallTime_median": 13, "ProcessCallTime_75th_percentile": 19, "ProcessCallTime_90th_percentile": 23, "ProcessCallTime_95th_percentile": 24, "ProcessCallTime_98th_percentile": 25, "ProcessCallTime_99th_percentile": 25, "ProcessCallTime_99.9th_percentile": 25, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 0, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 26, "TotalCallTime_mean": 6, "TotalCallTime_25th_percentile": 7, "TotalCallTime_median": 13, "TotalCallTime_75th_percentile": 19, "TotalCallTime_90th_percentile": 23, "TotalCallTime_95th_percentile": 24, "TotalCallTime_98th_percentile": 25, "TotalCallTime_99th_percentile": 25, "TotalCallTime_99.9th_percentile": 25, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 175, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 131, "ResponseSize_90th_percentile": 157, "ResponseSize_95th_percentile": 166, "ResponseSize_98th_percentile": 171, "ResponseSize_99th_percentile": 173, "ResponseSize_99.9th_percentile": 174, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 352 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "ec1863dc21e5", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:59844", "tag.serverName": "ec1863dc21e5,45471,1733534655575", "tag.clusterId": "80ce8d20-b7d8-4cb9-81e8-162bac6b070d", "tag.Context": "regionserver", "tag.Hostname": "ec1863dc21e5", "regionCount": 0, "storeCount": 0, "hlogFileCount": 1, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1733534655575, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 0, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 0, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 0, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 0, "ScanTime_min": 0, "ScanTime_max": 0, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 0, "ScanTime_75th_percentile": 0, "ScanTime_90th_percentile": 0, "ScanTime_95th_percentile": 0, "ScanTime_98th_percentile": 0, "ScanTime_99th_percentile": 0, "ScanTime_99.9th_percentile": 0, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 0, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-12-07T01:24:34,370 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40763 {}] master.MasterRpcServices(700): ec1863dc21e5,45349,1733534655753 reported a fatal error: ***** ABORTING region server ec1863dc21e5,45349,1733534655753: testing ***** 2024-12-07T01:24:34,373 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ec1863dc21e5,45349,1733534655753' ***** 2024-12-07T01:24:34,373 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-12-07T01:24:34,373 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T01:24:34,374 INFO [RS:1;ec1863dc21e5:45349 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-12-07T01:24:34,374 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T01:24:34,374 INFO [RS:1;ec1863dc21e5:45349 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-12-07T01:24:34,374 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(3091): Received CLOSE for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,374 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(956): aborting server ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:34,374 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T01:24:34,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45471 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.3:45796 deadline: 1733534734374, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45349 startCode=1733534655753. As of locationSeqNum=12. 2024-12-07T01:24:34,375 INFO [RS:1;ec1863dc21e5:45349 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ec1863dc21e5:45349. 2024-12-07T01:24:34,375 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4df18ab8d6a713905826e338f7d67d7c, disabling compactions & flushes 2024-12-07T01:24:34,375 DEBUG [RS:1;ec1863dc21e5:45349 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T01:24:34,375 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,375 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,375 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. after waiting 0 ms 2024-12-07T01:24:34,375 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,375 DEBUG [RS:1;ec1863dc21e5:45349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:34,375 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45471,1733534655575, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45471,1733534655575, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45349 startCode=1733534655753. As of locationSeqNum=12. 2024-12-07T01:24:34,375 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T01:24:34,375 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45471,1733534655575, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45349 startCode=1733534655753. As of locationSeqNum=12. 2024-12-07T01:24:34,376 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1325): Online Regions={4df18ab8d6a713905826e338f7d67d7c=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.} 2024-12-07T01:24:34,376 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45471,1733534655575, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ec1863dc21e5 port=45349 startCode=1733534655753. As of locationSeqNum=12. 2024-12-07T01:24:34,376 DEBUG [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1351): Waiting on 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:34,381 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,381 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4df18ab8d6a713905826e338f7d67d7c: Waiting for close lock at 1733534674374Running coprocessor pre-close hooks at 1733534674375 (+1 ms)Disabling compacts and flushes for region at 1733534674375Disabling writes for close at 1733534674375Writing region close event to WAL at 1733534674381 (+6 ms)Running coprocessor post-close hooks at 1733534674381Closed at 1733534674381 2024-12-07T01:24:34,382 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:34,467 INFO [regionserver/ec1863dc21e5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T01:24:34,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server ec1863dc21e5,45349,1733534655753 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:34,491 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=12, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server ec1863dc21e5,45349,1733534655753 aborting 2024-12-07T01:24:34,492 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=12 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server ec1863dc21e5,45349,1733534655753 aborting 2024-12-07T01:24:34,492 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=12 from cache 2024-12-07T01:24:34,574 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-12-07T01:24:34,574 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:34,576 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(976): stopping server ec1863dc21e5,45349,1733534655753; all regions closed. 2024-12-07T01:24:34,576 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-12-07T01:24:34,576 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:34,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741833_1009 (size=1407) 2024-12-07T01:24:34,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741833_1009 (size=1407) 2024-12-07T01:24:34,581 DEBUG [RS:1;ec1863dc21e5:45349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:34,581 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T01:24:34,581 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T01:24:34,581 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.ChoreService(370): Chore service for: regionserver/ec1863dc21e5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T01:24:34,581 INFO [regionserver/ec1863dc21e5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T01:24:34,581 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T01:24:34,582 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T01:24:34,582 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T01:24:34,582 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T01:24:34,582 INFO [RS:1;ec1863dc21e5:45349 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45349 2024-12-07T01:24:34,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T01:24:34,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ec1863dc21e5,45349,1733534655753 2024-12-07T01:24:34,657 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T01:24:34,668 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ec1863dc21e5,45349,1733534655753] 2024-12-07T01:24:34,677 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ec1863dc21e5,45349,1733534655753 already deleted, retry=false 2024-12-07T01:24:34,677 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of ec1863dc21e5,45349,1733534655753 on ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:34,684 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure ec1863dc21e5,45349,1733534655753, splitWal=true, meta=false 2024-12-07T01:24:34,688 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for ec1863dc21e5,45349,1733534655753 (carryingMeta=false) ec1863dc21e5,45349,1733534655753/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@27979b8f[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-12-07T01:24:34,689 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure ec1863dc21e5,45349,1733534655753, splitWal=true, meta=false 2024-12-07T01:24:34,691 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(207): ec1863dc21e5,45349,1733534655753 had 1 regions 2024-12-07T01:24:34,693 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure ec1863dc21e5,45349,1733534655753, splitWal=true, meta=false, isMeta: false 2024-12-07T01:24:34,694 DEBUG [PEWorker-1 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting 2024-12-07T01:24:34,696 INFO [PEWorker-1 {}] master.SplitWALManager(105): ec1863dc21e5,45349,1733534655753 WAL count=1, meta=false 2024-12-07T01:24:34,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure ec1863dc21e5%2C45349%2C1733534655753.1733534658407}] 2024-12-07T01:24:34,703 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18] 2024-12-07T01:24:34,703 DEBUG [PEWorker-2 {}] master.SplitWALManager(158): Acquired split WAL worker=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:34,704 WARN [RPCClient-NioEventLoopGroup-6-5 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server ec1863dc21e5:45349 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: ec1863dc21e5/172.17.0.3:45349 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:34,705 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18, error=java.net.ConnectException: Call to address=ec1863dc21e5:45349 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: ec1863dc21e5/172.17.0.3:45349 2024-12-07T01:24:34,705 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18 is java.net.ConnectException: Connection refused 2024-12-07T01:24:34,705 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18 from cache 2024-12-07T01:24:34,705 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.FailedServers(52): Added failed server with address ec1863dc21e5:45349 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: ec1863dc21e5/172.17.0.3:45349 2024-12-07T01:24:34,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure ec1863dc21e5%2C45349%2C1733534655753.1733534658407, worker=ec1863dc21e5,45471,1733534655575}] 2024-12-07T01:24:34,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:34,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45349-0x101ad6397820002, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:34,768 INFO [RS:1;ec1863dc21e5:45349 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T01:24:34,768 INFO [RS:1;ec1863dc21e5:45349 {}] regionserver.HRegionServer(1031): Exiting; stopping=ec1863dc21e5,45349,1733534655753; zookeeper connection closed. 2024-12-07T01:24:34,769 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7f0fe0e3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7f0fe0e3 2024-12-07T01:24:34,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45471 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-12-07T01:24:34,888 INFO [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407, size=1.4 K (1407bytes) 2024-12-07T01:24:34,888 INFO [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 2024-12-07T01:24:34,889 INFO [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 after 1ms 2024-12-07T01:24:34,892 DEBUG [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:34,892 INFO [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 took 4ms 2024-12-07T01:24:34,900 DEBUG [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 4df18ab8d6a713905826e338f7d67d7c: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-12-07T01:24:34,901 DEBUG [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 so closing down 2024-12-07T01:24:34,901 DEBUG [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:34,901 INFO [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:34,901 INFO [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 9 ms; skipped=6; WAL=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407, size=1.4 K, length=1407, corrupted=false, cancelled=false 2024-12-07T01:24:34,901 DEBUG [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407, journal: Splitting hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407, size=1.4 K (1407bytes) at 1733534674888Finishing writing output for hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 so closing down at 1733534674901 (+13 ms)3 split writer threads finished at 1733534674901Processed 6 edits across 0 Regions in 9 ms; skipped=6; WAL=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407, size=1.4 K, length=1407, corrupted=false, cancelled=false at 1733534674901 2024-12-07T01:24:34,901 DEBUG [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 2024-12-07T01:24:34,904 DEBUG [RS_LOG_REPLAY_OPS-regionserver/ec1863dc21e5:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-07T01:24:34,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40763 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-12-07T01:24:34,911 INFO [PEWorker-4 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting/ec1863dc21e5%2C45349%2C1733534655753.1733534658407 to hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs 2024-12-07T01:24:34,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-07T01:24:34,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure ec1863dc21e5%2C45349%2C1733534655753.1733534658407, worker=ec1863dc21e5,45471,1733534655575 in 205 msec 2024-12-07T01:24:34,915 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:34,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-07T01:24:34,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure ec1863dc21e5%2C45349%2C1733534655753.1733534658407, worker=ec1863dc21e5,45471,1733534655575 in 219 msec 2024-12-07T01:24:34,920 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting dir is empty, no logs to split. 2024-12-07T01:24:34,920 INFO [PEWorker-1 {}] master.SplitWALManager(105): ec1863dc21e5,45349,1733534655753 WAL count=0, meta=false 2024-12-07T01:24:34,920 DEBUG [PEWorker-1 {}] procedure.ServerCrashProcedure(329): Check if ec1863dc21e5,45349,1733534655753 WAL splitting is done? wals=0, meta=false 2024-12-07T01:24:34,925 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(321): Remove WAL directory for ec1863dc21e5,45349,1733534655753 failed, ignore...File hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/WALs/ec1863dc21e5,45349,1733534655753-splitting does not exist. 2024-12-07T01:24:34,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN}] 2024-12-07T01:24:34,929 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN 2024-12-07T01:24:34,932 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-07T01:24:35,013 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18] 2024-12-07T01:24:35,014 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.AbstractRpcClient(357): Not trying to connect to ec1863dc21e5:45349 this server is in the failed servers list 2024-12-07T01:24:35,014 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=ec1863dc21e5:45349 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: ec1863dc21e5:45349 2024-12-07T01:24:35,015 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: ec1863dc21e5:45349 2024-12-07T01:24:35,015 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45349,1733534655753, seqNum=18 from cache 2024-12-07T01:24:35,082 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(204): Hosts are {ec1863dc21e5=0} racks are {/default-rack=0} 2024-12-07T01:24:35,082 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T01:24:35,082 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T01:24:35,082 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T01:24:35,082 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T01:24:35,082 INFO [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T01:24:35,082 INFO [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T01:24:35,083 DEBUG [ec1863dc21e5:40763 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T01:24:35,083 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPENING, regionLocation=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:35,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN because future has completed 2024-12-07T01:24:35,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575}] 2024-12-07T01:24:35,245 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:35,245 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 4df18ab8d6a713905826e338f7d67d7c, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:35,246 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,246 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:35,246 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7794): checking encryption for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,246 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7797): checking classloading for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,248 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,250 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf1 2024-12-07T01:24:35,250 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,261 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf1/cd65736c10b446eabe28864dc5b25ae7 2024-12-07T01:24:35,261 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,261 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,262 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4df18ab8d6a713905826e338f7d67d7c columnFamilyName cf2 2024-12-07T01:24:35,263 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,269 DEBUG [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/cf2/1c6d273452ce499da738dba6b1c10d07 2024-12-07T01:24:35,269 INFO [StoreOpener-4df18ab8d6a713905826e338f7d67d7c-1 {}] regionserver.HStore(327): Store=4df18ab8d6a713905826e338f7d67d7c/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,269 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1038): replaying wal for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,270 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,271 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,272 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1048): stopping wal replay for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,272 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,272 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T01:24:35,274 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1093): writing seq id for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,274 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1114): Opened 4df18ab8d6a713905826e338f7d67d7c; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63451030, jitterRate=-0.05450597405433655}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T01:24:35,275 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:35,275 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1006): Region open journal for 4df18ab8d6a713905826e338f7d67d7c: Running coprocessor pre-open hook at 1733534675246Writing region info on filesystem at 1733534675246Initializing all the Stores at 1733534675248 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675248Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675248Cleaning up temporary data from old regions at 1733534675272 (+24 ms)Running coprocessor post-open hooks at 1733534675275 (+3 ms)Region opened successfully at 1733534675275 2024-12-07T01:24:35,276 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., pid=17, masterSystemTime=1733534675238 2024-12-07T01:24:35,278 DEBUG [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:35,278 INFO [RS_OPEN_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:35,279 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=4df18ab8d6a713905826e338f7d67d7c, regionState=OPEN, openSeqNum=18, regionLocation=ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:35,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575 because future has completed 2024-12-07T01:24:35,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-12-07T01:24:35,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 4df18ab8d6a713905826e338f7d67d7c, server=ec1863dc21e5,45471,1733534655575 in 198 msec 2024-12-07T01:24:35,288 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-12-07T01:24:35,288 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4df18ab8d6a713905826e338f7d67d7c, ASSIGN in 359 msec 2024-12-07T01:24:35,288 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(291): removed crashed server ec1863dc21e5,45349,1733534655753 after splitting done 2024-12-07T01:24:35,290 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure ec1863dc21e5,45349,1733534655753, splitWal=true, meta=false in 610 msec 2024-12-07T01:24:35,523 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c., hostname=ec1863dc21e5,45471,1733534655575, seqNum=18] 2024-12-07T01:24:35,536 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=402 (was 404), OpenFileDescriptor=1028 (was 999) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=206 (was 198) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7762 (was 7820) 2024-12-07T01:24:35,538 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1028 is superior to 1024 2024-12-07T01:24:35,549 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=402, OpenFileDescriptor=1028, MaxFileDescriptor=1048576, SystemLoadAverage=206, ProcessCount=11, AvailableMemoryMB=7761 2024-12-07T01:24:35,549 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1028 is superior to 1024 2024-12-07T01:24:35,564 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:35,565 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:35,566 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:35,570 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-54345738, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-54345738, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:35,582 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-54345738/hregion-54345738.1733534675570, exclude list is [], retry=0 2024-12-07T01:24:35,585 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:35,585 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:35,585 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:35,587 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-54345738/hregion-54345738.1733534675570 2024-12-07T01:24:35,589 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:35,589 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 5ff4c3bb8df599257123f6589dc50c49, NAME => 'testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:35,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741893_1071 (size=67) 2024-12-07T01:24:35,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741893_1071 (size=67) 2024-12-07T01:24:35,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741893_1071 (size=67) 2024-12-07T01:24:35,601 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:35,602 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,604 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName a 2024-12-07T01:24:35,604 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,605 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,605 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,607 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName b 2024-12-07T01:24:35,607 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,608 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,608 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,610 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName c 2024-12-07T01:24:35,610 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,611 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,611 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,612 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,612 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,613 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,613 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,614 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:35,615 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,617 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:35,617 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5ff4c3bb8df599257123f6589dc50c49; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73948518, jitterRate=0.1019187867641449}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:35,618 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5ff4c3bb8df599257123f6589dc50c49: Writing region info on filesystem at 1733534675601Initializing all the Stores at 1733534675602 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675602Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675602Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675602Cleaning up temporary data from old regions at 1733534675613 (+11 ms)Region opened successfully at 1733534675618 (+5 ms) 2024-12-07T01:24:35,618 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 5ff4c3bb8df599257123f6589dc50c49, disabling compactions & flushes 2024-12-07T01:24:35,618 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,618 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,618 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. after waiting 0 ms 2024-12-07T01:24:35,618 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,618 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,618 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 5ff4c3bb8df599257123f6589dc50c49: Waiting for close lock at 1733534675618Disabling compacts and flushes for region at 1733534675618Disabling writes for close at 1733534675618Writing region close event to WAL at 1733534675618Closed at 1733534675618 2024-12-07T01:24:35,622 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-54345738/hregion-54345738.1733534675570 not finished, retry = 0 2024-12-07T01:24:35,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741892_1070 (size=95) 2024-12-07T01:24:35,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741892_1070 (size=95) 2024-12-07T01:24:35,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741892_1070 (size=95) 2024-12-07T01:24:35,727 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:35,727 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-54345738:(num 1733534675570) 2024-12-07T01:24:35,727 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:35,732 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:35,747 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732, exclude list is [], retry=0 2024-12-07T01:24:35,749 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:35,750 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:35,750 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:35,752 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732 2024-12-07T01:24:35,752 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:35,752 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5ff4c3bb8df599257123f6589dc50c49, NAME => 'testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:35,752 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:35,753 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,753 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,754 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,755 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName a 2024-12-07T01:24:35,755 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,756 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,756 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,756 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName b 2024-12-07T01:24:35,757 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,757 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,757 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,758 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName c 2024-12-07T01:24:35,758 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:35,758 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:35,759 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,759 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,761 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,762 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,762 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,762 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:35,764 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:35,765 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5ff4c3bb8df599257123f6589dc50c49; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64042619, jitterRate=-0.04569061100482941}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:35,765 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5ff4c3bb8df599257123f6589dc50c49: Writing region info on filesystem at 1733534675753Initializing all the Stores at 1733534675754 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675754Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675754Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534675754Cleaning up temporary data from old regions at 1733534675762 (+8 ms)Region opened successfully at 1733534675765 (+3 ms) 2024-12-07T01:24:35,791 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5ff4c3bb8df599257123f6589dc50c49 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-07T01:24:35,808 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/a/b2421817c28a433eafe7a70a4a7d703a is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733534675766/Put/seqid=0 2024-12-07T01:24:35,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741895_1073 (size=5958) 2024-12-07T01:24:35,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741895_1073 (size=5958) 2024-12-07T01:24:35,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741895_1073 (size=5958) 2024-12-07T01:24:35,821 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/a/b2421817c28a433eafe7a70a4a7d703a 2024-12-07T01:24:35,849 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/b/36c480d65ff74df8b5a309d9125c7d2a is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733534675774/Put/seqid=0 2024-12-07T01:24:35,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741896_1074 (size=5958) 2024-12-07T01:24:35,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741896_1074 (size=5958) 2024-12-07T01:24:35,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741896_1074 (size=5958) 2024-12-07T01:24:35,857 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/b/36c480d65ff74df8b5a309d9125c7d2a 2024-12-07T01:24:35,881 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/c/1cd3d611e55748039f05b5582d4c633e is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733534675782/Put/seqid=0 2024-12-07T01:24:35,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741897_1075 (size=5958) 2024-12-07T01:24:35,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741897_1075 (size=5958) 2024-12-07T01:24:35,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741897_1075 (size=5958) 2024-12-07T01:24:35,890 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/c/1cd3d611e55748039f05b5582d4c633e 2024-12-07T01:24:35,898 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/a/b2421817c28a433eafe7a70a4a7d703a as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/a/b2421817c28a433eafe7a70a4a7d703a 2024-12-07T01:24:35,905 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/a/b2421817c28a433eafe7a70a4a7d703a, entries=10, sequenceid=33, filesize=5.8 K 2024-12-07T01:24:35,906 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/b/36c480d65ff74df8b5a309d9125c7d2a as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/b/36c480d65ff74df8b5a309d9125c7d2a 2024-12-07T01:24:35,913 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/b/36c480d65ff74df8b5a309d9125c7d2a, entries=10, sequenceid=33, filesize=5.8 K 2024-12-07T01:24:35,915 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/c/1cd3d611e55748039f05b5582d4c633e as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/c/1cd3d611e55748039f05b5582d4c633e 2024-12-07T01:24:35,922 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/c/1cd3d611e55748039f05b5582d4c633e, entries=10, sequenceid=33, filesize=5.8 K 2024-12-07T01:24:35,923 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 5ff4c3bb8df599257123f6589dc50c49 in 132ms, sequenceid=33, compaction requested=false 2024-12-07T01:24:35,923 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 5ff4c3bb8df599257123f6589dc50c49: 2024-12-07T01:24:35,924 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 5ff4c3bb8df599257123f6589dc50c49, disabling compactions & flushes 2024-12-07T01:24:35,924 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,924 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,924 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. after waiting 0 ms 2024-12-07T01:24:35,924 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,925 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. 2024-12-07T01:24:35,925 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 5ff4c3bb8df599257123f6589dc50c49: Waiting for close lock at 1733534675924Disabling compacts and flushes for region at 1733534675924Disabling writes for close at 1733534675924Writing region close event to WAL at 1733534675925 (+1 ms)Closed at 1733534675925 2024-12-07T01:24:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741894_1072 (size=3386) 2024-12-07T01:24:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741894_1072 (size=3386) 2024-12-07T01:24:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741894_1072 (size=3386) 2024-12-07T01:24:35,933 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/b/36c480d65ff74df8b5a309d9125c7d2a to hdfs://localhost:42771/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/b/36c480d65ff74df8b5a309d9125c7d2a 2024-12-07T01:24:35,961 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732, size=3.3 K (3386bytes) 2024-12-07T01:24:35,961 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732 2024-12-07T01:24:35,961 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732 after 0ms 2024-12-07T01:24:35,963 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:35,964 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732 took 4ms 2024-12-07T01:24:35,967 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733534675732.temp 2024-12-07T01:24:35,969 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000003-wal.1733534675732.temp 2024-12-07T01:24:35,971 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732 so closing down 2024-12-07T01:24:35,972 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:35,972 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:35,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741898_1076 (size=2944) 2024-12-07T01:24:35,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741898_1076 (size=2944) 2024-12-07T01:24:35,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741898_1076 (size=2944) 2024-12-07T01:24:35,984 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000003-wal.1733534675732.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:35,986 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000003-wal.1733534675732.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000032 2024-12-07T01:24:35,986 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 22 ms; skipped=2; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732, size=3.3 K, length=3386, corrupted=false, cancelled=false 2024-12-07T01:24:35,986 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732, journal: Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732, size=3.3 K (3386bytes) at 1733534675961Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000003-wal.1733534675732.temp at 1733534675969 (+8 ms)Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732 so closing down at 1733534675972 (+3 ms)3 split writer threads finished at 1733534675973 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000003-wal.1733534675732.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733534675984 (+11 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000003-wal.1733534675732.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000032 at 1733534675986 (+2 ms)Processed 32 edits across 1 Regions in 22 ms; skipped=2; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732, size=3.3 K, length=3386, corrupted=false, cancelled=false at 1733534675986 2024-12-07T01:24:35,988 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675732 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534675732 2024-12-07T01:24:35,988 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000032 2024-12-07T01:24:35,988 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:35,990 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:36,009 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675990, exclude list is [], retry=0 2024-12-07T01:24:36,011 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:36,012 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:36,012 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:36,013 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675990 2024-12-07T01:24:36,014 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487)] 2024-12-07T01:24:36,014 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5ff4c3bb8df599257123f6589dc50c49, NAME => 'testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:36,014 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:36,014 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,014 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,015 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,016 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName a 2024-12-07T01:24:36,016 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,025 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/a/b2421817c28a433eafe7a70a4a7d703a 2024-12-07T01:24:36,025 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,025 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,027 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName b 2024-12-07T01:24:36,027 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,027 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,028 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,028 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff4c3bb8df599257123f6589dc50c49 columnFamilyName c 2024-12-07T01:24:36,028 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,034 DEBUG [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/c/1cd3d611e55748039f05b5582d4c633e 2024-12-07T01:24:36,034 INFO [StoreOpener-5ff4c3bb8df599257123f6589dc50c49-1 {}] regionserver.HStore(327): Store=5ff4c3bb8df599257123f6589dc50c49/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,034 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,035 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,036 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,037 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000032 2024-12-07T01:24:36,039 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:36,040 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000032 2024-12-07T01:24:36,041 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5ff4c3bb8df599257123f6589dc50c49 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-07T01:24:36,060 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/b/b603480bff244adbad920a8469b4177e is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733534675774/Put/seqid=0 2024-12-07T01:24:36,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741900_1078 (size=5958) 2024-12-07T01:24:36,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741900_1078 (size=5958) 2024-12-07T01:24:36,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741900_1078 (size=5958) 2024-12-07T01:24:36,067 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/b/b603480bff244adbad920a8469b4177e 2024-12-07T01:24:36,074 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/.tmp/b/b603480bff244adbad920a8469b4177e as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/b/b603480bff244adbad920a8469b4177e 2024-12-07T01:24:36,080 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/b/b603480bff244adbad920a8469b4177e, entries=10, sequenceid=32, filesize=5.8 K 2024-12-07T01:24:36,081 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 5ff4c3bb8df599257123f6589dc50c49 in 41ms, sequenceid=32, compaction requested=false; wal=null 2024-12-07T01:24:36,082 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/0000000000000000032 2024-12-07T01:24:36,083 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,083 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,083 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:36,085 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5ff4c3bb8df599257123f6589dc50c49 2024-12-07T01:24:36,087 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/5ff4c3bb8df599257123f6589dc50c49/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-12-07T01:24:36,088 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5ff4c3bb8df599257123f6589dc50c49; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63224933, jitterRate=-0.05787508189678192}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:36,089 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5ff4c3bb8df599257123f6589dc50c49: Writing region info on filesystem at 1733534676014Initializing all the Stores at 1733534676015 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676015Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676015Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676015Obtaining lock to block concurrent updates at 1733534676041 (+26 ms)Preparing flush snapshotting stores in 5ff4c3bb8df599257123f6589dc50c49 at 1733534676041Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1733534676041Flushing stores of testReplayEditsWrittenViaHRegion,,1733534675564.5ff4c3bb8df599257123f6589dc50c49. at 1733534676041Flushing 5ff4c3bb8df599257123f6589dc50c49/b: creating writer at 1733534676041Flushing 5ff4c3bb8df599257123f6589dc50c49/b: appending metadata at 1733534676060 (+19 ms)Flushing 5ff4c3bb8df599257123f6589dc50c49/b: closing flushed file at 1733534676060Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20656195: reopening flushed file at 1733534676073 (+13 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 5ff4c3bb8df599257123f6589dc50c49 in 41ms, sequenceid=32, compaction requested=false; wal=null at 1733534676081 (+8 ms)Cleaning up temporary data from old regions at 1733534676083 (+2 ms)Region opened successfully at 1733534676089 (+6 ms) 2024-12-07T01:24:36,116 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=409 (was 402) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:40638 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:40514 [Waiting for operation #36] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:34852 [Waiting for operation #23] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:37386 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:35000 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:37214 [Waiting for operation #43] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1102 (was 1028) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=206 (was 206), ProcessCount=11 (was 11), AvailableMemoryMB=7748 (was 7761) 2024-12-07T01:24:36,116 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1102 is superior to 1024 2024-12-07T01:24:36,134 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=409, OpenFileDescriptor=1102, MaxFileDescriptor=1048576, SystemLoadAverage=206, ProcessCount=11, AvailableMemoryMB=7743 2024-12-07T01:24:36,135 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1102 is superior to 1024 2024-12-07T01:24:36,155 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:36,157 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:36,158 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:36,161 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-03483250, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-03483250, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:36,174 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-03483250/hregion-03483250.1733534676162, exclude list is [], retry=0 2024-12-07T01:24:36,177 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:36,177 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:36,178 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:36,183 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-03483250/hregion-03483250.1733534676162 2024-12-07T01:24:36,184 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:36,184 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 1806f5398d6805a833fc5f3bee22f3d3, NAME => 'testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:36,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741902_1080 (size=68) 2024-12-07T01:24:36,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741902_1080 (size=68) 2024-12-07T01:24:36,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741902_1080 (size=68) 2024-12-07T01:24:36,196 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:36,200 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,201 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName a 2024-12-07T01:24:36,201 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,202 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,202 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,204 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName b 2024-12-07T01:24:36,204 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,205 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,205 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,207 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName c 2024-12-07T01:24:36,207 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,208 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,208 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,209 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,209 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,210 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,210 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,211 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:36,211 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,213 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:36,214 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1806f5398d6805a833fc5f3bee22f3d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71908923, jitterRate=0.07152645289897919}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:36,214 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1806f5398d6805a833fc5f3bee22f3d3: Writing region info on filesystem at 1733534676196Initializing all the Stores at 1733534676196Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676196Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676199 (+3 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676199Cleaning up temporary data from old regions at 1733534676210 (+11 ms)Region opened successfully at 1733534676214 (+4 ms) 2024-12-07T01:24:36,214 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1806f5398d6805a833fc5f3bee22f3d3, disabling compactions & flushes 2024-12-07T01:24:36,214 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:36,214 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:36,214 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. after waiting 0 ms 2024-12-07T01:24:36,214 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:36,216 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:36,216 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1806f5398d6805a833fc5f3bee22f3d3: Waiting for close lock at 1733534676214Disabling compacts and flushes for region at 1733534676214Disabling writes for close at 1733534676214Writing region close event to WAL at 1733534676216 (+2 ms)Closed at 1733534676216 2024-12-07T01:24:36,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741901_1079 (size=95) 2024-12-07T01:24:36,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741901_1079 (size=95) 2024-12-07T01:24:36,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741901_1079 (size=95) 2024-12-07T01:24:36,222 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:36,222 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-03483250:(num 1733534676162) 2024-12-07T01:24:36,222 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:36,224 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:36,243 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225, exclude list is [], retry=0 2024-12-07T01:24:36,247 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:36,247 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:36,248 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:36,249 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225 2024-12-07T01:24:36,250 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487)] 2024-12-07T01:24:36,330 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1806f5398d6805a833fc5f3bee22f3d3, NAME => 'testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:36,334 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,334 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:36,334 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,334 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,337 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,338 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName a 2024-12-07T01:24:36,339 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,340 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,340 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,341 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName b 2024-12-07T01:24:36,341 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,342 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,343 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,344 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName c 2024-12-07T01:24:36,344 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:36,345 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:36,345 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,346 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,348 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,349 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,349 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,350 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:36,352 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,353 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1806f5398d6805a833fc5f3bee22f3d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72326246, jitterRate=0.07774505019187927}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:36,353 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:36,354 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1806f5398d6805a833fc5f3bee22f3d3: Running coprocessor pre-open hook at 1733534676335Writing region info on filesystem at 1733534676335Initializing all the Stores at 1733534676336 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676336Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676337 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534676337Cleaning up temporary data from old regions at 1733534676349 (+12 ms)Running coprocessor post-open hooks at 1733534676353 (+4 ms)Region opened successfully at 1733534676354 (+1 ms) 2024-12-07T01:24:36,372 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1806f5398d6805a833fc5f3bee22f3d3 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-12-07T01:24:36,373 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:37,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741833_1009 (size=1407) 2024-12-07T01:24:37,374 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:38,015 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T01:24:38,377 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:39,378 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:40,379 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:41,380 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:42,381 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:43,352 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T01:24:43,382 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:44,383 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:44,575 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-12-07T01:24:44,575 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:44,577 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-12-07T01:24:44,577 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:44,579 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-12-07T01:24:44,579 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:45,384 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1806f5398d6805a833fc5f3bee22f3d3/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:45,385 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1806f5398d6805a833fc5f3bee22f3d3: 2024-12-07T01:24:45,385 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:45,400 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1806f5398d6805a833fc5f3bee22f3d3: 2024-12-07T01:24:45,400 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-12-07T01:24:45,400 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1806f5398d6805a833fc5f3bee22f3d3, disabling compactions & flushes 2024-12-07T01:24:45,400 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:45,400 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:45,400 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. after waiting 0 ms 2024-12-07T01:24:45,400 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:45,401 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:45,401 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. 2024-12-07T01:24:45,401 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1806f5398d6805a833fc5f3bee22f3d3: Waiting for close lock at 1733534685400Running coprocessor pre-close hooks at 1733534685400Disabling compacts and flushes for region at 1733534685400Disabling writes for close at 1733534685400Writing region close event to WAL at 1733534685401 (+1 ms)Running coprocessor post-close hooks at 1733534685401Closed at 1733534685401 2024-12-07T01:24:45,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741903_1081 (size=2691) 2024-12-07T01:24:45,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741903_1081 (size=2691) 2024-12-07T01:24:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741903_1081 (size=2691) 2024-12-07T01:24:45,418 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225, size=2.6 K (2691bytes) 2024-12-07T01:24:45,418 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225 2024-12-07T01:24:45,418 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225 after 0ms 2024-12-07T01:24:45,421 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:45,421 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225 took 3ms 2024-12-07T01:24:45,423 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225 so closing down 2024-12-07T01:24:45,423 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:45,424 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1733534676225.temp 2024-12-07T01:24:45,425 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000004-wal.1733534676225.temp 2024-12-07T01:24:45,425 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741904_1082 (size=2094) 2024-12-07T01:24:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741904_1082 (size=2094) 2024-12-07T01:24:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741904_1082 (size=2094) 2024-12-07T01:24:45,431 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000004-wal.1733534676225.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:45,432 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000004-wal.1733534676225.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000026 2024-12-07T01:24:45,432 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 11 ms; skipped=3; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225, size=2.6 K, length=2691, corrupted=false, cancelled=false 2024-12-07T01:24:45,432 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225, journal: Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225, size=2.6 K (2691bytes) at 1733534685418Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225 so closing down at 1733534685423 (+5 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000004-wal.1733534676225.temp at 1733534685425 (+2 ms)3 split writer threads finished at 1733534685425Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000004-wal.1733534676225.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1733534685431 (+6 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000004-wal.1733534676225.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000026 at 1733534685432 (+1 ms)Processed 23 edits across 1 Regions in 11 ms; skipped=3; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225, size=2.6 K, length=2691, corrupted=false, cancelled=false at 1733534685432 2024-12-07T01:24:45,434 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534676225 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534676225 2024-12-07T01:24:45,434 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000026 2024-12-07T01:24:45,435 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:45,436 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:45,450 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534685437, exclude list is [], retry=0 2024-12-07T01:24:45,453 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:45,454 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:45,454 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:45,455 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534685437 2024-12-07T01:24:45,456 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:41487:41487)] 2024-12-07T01:24:45,456 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1806f5398d6805a833fc5f3bee22f3d3, NAME => 'testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:45,457 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,457 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:45,457 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,457 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,458 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,459 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName a 2024-12-07T01:24:45,459 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:45,460 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:45,460 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,461 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName b 2024-12-07T01:24:45,461 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:45,461 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:45,461 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,462 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1806f5398d6805a833fc5f3bee22f3d3 columnFamilyName c 2024-12-07T01:24:45,462 DEBUG [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:45,462 INFO [StoreOpener-1806f5398d6805a833fc5f3bee22f3d3-1 {}] regionserver.HStore(327): Store=1806f5398d6805a833fc5f3bee22f3d3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:45,462 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,463 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,464 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,465 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000026 2024-12-07T01:24:45,467 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:45,468 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000026 2024-12-07T01:24:45,469 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1806f5398d6805a833fc5f3bee22f3d3 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-12-07T01:24:45,485 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/a/e774f8bcbfc24b288795df7c8b2abdc0 is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1733534685390/Put/seqid=0 2024-12-07T01:24:45,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741906_1084 (size=5523) 2024-12-07T01:24:45,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741906_1084 (size=5523) 2024-12-07T01:24:45,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741906_1084 (size=5523) 2024-12-07T01:24:45,492 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/a/e774f8bcbfc24b288795df7c8b2abdc0 2024-12-07T01:24:45,514 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/b/c97c15b42b014e15918e5d6581589642 is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1733534685385/Put/seqid=0 2024-12-07T01:24:45,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741907_1085 (size=5524) 2024-12-07T01:24:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741907_1085 (size=5524) 2024-12-07T01:24:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741907_1085 (size=5524) 2024-12-07T01:24:45,520 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/b/c97c15b42b014e15918e5d6581589642 2024-12-07T01:24:45,539 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/c/f364a9e2d65349f6aa50d5dda2b79285 is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1733534685388/Put/seqid=0 2024-12-07T01:24:45,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741908_1086 (size=5457) 2024-12-07T01:24:45,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741908_1086 (size=5457) 2024-12-07T01:24:45,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741908_1086 (size=5457) 2024-12-07T01:24:45,546 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/c/f364a9e2d65349f6aa50d5dda2b79285 2024-12-07T01:24:45,551 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/a/e774f8bcbfc24b288795df7c8b2abdc0 as hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/a/e774f8bcbfc24b288795df7c8b2abdc0 2024-12-07T01:24:45,557 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/a/e774f8bcbfc24b288795df7c8b2abdc0, entries=7, sequenceid=26, filesize=5.4 K 2024-12-07T01:24:45,559 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/b/c97c15b42b014e15918e5d6581589642 as hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/b/c97c15b42b014e15918e5d6581589642 2024-12-07T01:24:45,565 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/b/c97c15b42b014e15918e5d6581589642, entries=7, sequenceid=26, filesize=5.4 K 2024-12-07T01:24:45,566 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/.tmp/c/f364a9e2d65349f6aa50d5dda2b79285 as hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/c/f364a9e2d65349f6aa50d5dda2b79285 2024-12-07T01:24:45,572 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/c/f364a9e2d65349f6aa50d5dda2b79285, entries=6, sequenceid=26, filesize=5.3 K 2024-12-07T01:24:45,572 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 1806f5398d6805a833fc5f3bee22f3d3 in 103ms, sequenceid=26, compaction requested=false; wal=null 2024-12-07T01:24:45,573 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/0000000000000000026 2024-12-07T01:24:45,575 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,575 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,576 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:45,578 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,580 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsAfterAbortingFlush/1806f5398d6805a833fc5f3bee22f3d3/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-12-07T01:24:45,581 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1806f5398d6805a833fc5f3bee22f3d3; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72616487, jitterRate=0.08206997811794281}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:45,581 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1806f5398d6805a833fc5f3bee22f3d3 2024-12-07T01:24:45,582 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1806f5398d6805a833fc5f3bee22f3d3: Running coprocessor pre-open hook at 1733534685457Writing region info on filesystem at 1733534685457Initializing all the Stores at 1733534685458 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534685458Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534685458Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534685458Obtaining lock to block concurrent updates at 1733534685469 (+11 ms)Preparing flush snapshotting stores in 1806f5398d6805a833fc5f3bee22f3d3 at 1733534685469Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1733534685469Flushing stores of testReplayEditsAfterAbortingFlush,,1733534676156.1806f5398d6805a833fc5f3bee22f3d3. at 1733534685469Flushing 1806f5398d6805a833fc5f3bee22f3d3/a: creating writer at 1733534685469Flushing 1806f5398d6805a833fc5f3bee22f3d3/a: appending metadata at 1733534685485 (+16 ms)Flushing 1806f5398d6805a833fc5f3bee22f3d3/a: closing flushed file at 1733534685485Flushing 1806f5398d6805a833fc5f3bee22f3d3/b: creating writer at 1733534685497 (+12 ms)Flushing 1806f5398d6805a833fc5f3bee22f3d3/b: appending metadata at 1733534685513 (+16 ms)Flushing 1806f5398d6805a833fc5f3bee22f3d3/b: closing flushed file at 1733534685513Flushing 1806f5398d6805a833fc5f3bee22f3d3/c: creating writer at 1733534685526 (+13 ms)Flushing 1806f5398d6805a833fc5f3bee22f3d3/c: appending metadata at 1733534685539 (+13 ms)Flushing 1806f5398d6805a833fc5f3bee22f3d3/c: closing flushed file at 1733534685539Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75f6c45f: reopening flushed file at 1733534685551 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79022009: reopening flushed file at 1733534685558 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2cde9e77: reopening flushed file at 1733534685565 (+7 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 1806f5398d6805a833fc5f3bee22f3d3 in 103ms, sequenceid=26, compaction requested=false; wal=null at 1733534685572 (+7 ms)Cleaning up temporary data from old regions at 1733534685575 (+3 ms)Running coprocessor post-open hooks at 1733534685581 (+6 ms)Region opened successfully at 1733534685582 (+1 ms) 2024-12-07T01:24:45,599 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=412 (was 409) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:41968 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:48624 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:42004 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:48606 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44270 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44260 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1164 (was 1102) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=206 (was 206), ProcessCount=11 (was 11), AvailableMemoryMB=7642 (was 7743) 2024-12-07T01:24:45,600 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1164 is superior to 1024 2024-12-07T01:24:45,611 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=412, OpenFileDescriptor=1164, MaxFileDescriptor=1048576, SystemLoadAverage=206, ProcessCount=11, AvailableMemoryMB=7642 2024-12-07T01:24:45,611 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1164 is superior to 1024 2024-12-07T01:24:45,626 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:45,628 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:45,628 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:45,630 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-44797613, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-44797613, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:45,641 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-44797613/hregion-44797613.1733534685631, exclude list is [], retry=0 2024-12-07T01:24:45,643 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:45,644 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:45,644 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:45,645 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-44797613/hregion-44797613.1733534685631 2024-12-07T01:24:45,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487)] 2024-12-07T01:24:45,646 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => f304e33398d21a2f6ae84b657dc1122f, NAME => 'testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:45,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741910_1088 (size=61) 2024-12-07T01:24:45,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741910_1088 (size=61) 2024-12-07T01:24:45,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741910_1088 (size=61) 2024-12-07T01:24:45,654 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:45,655 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,656 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f304e33398d21a2f6ae84b657dc1122f columnFamilyName a 2024-12-07T01:24:45,656 DEBUG [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:45,657 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(327): Store=f304e33398d21a2f6ae84b657dc1122f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:45,657 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,658 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,658 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,658 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,658 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,660 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,662 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:45,663 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened f304e33398d21a2f6ae84b657dc1122f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73392282, jitterRate=0.09363022446632385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:45,663 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for f304e33398d21a2f6ae84b657dc1122f: Writing region info on filesystem at 1733534685654Initializing all the Stores at 1733534685655 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534685655Cleaning up temporary data from old regions at 1733534685658 (+3 ms)Region opened successfully at 1733534685663 (+5 ms) 2024-12-07T01:24:45,663 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing f304e33398d21a2f6ae84b657dc1122f, disabling compactions & flushes 2024-12-07T01:24:45,663 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,663 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,663 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. after waiting 0 ms 2024-12-07T01:24:45,663 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,664 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,664 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for f304e33398d21a2f6ae84b657dc1122f: Waiting for close lock at 1733534685663Disabling compacts and flushes for region at 1733534685663Disabling writes for close at 1733534685663Writing region close event to WAL at 1733534685664 (+1 ms)Closed at 1733534685664 2024-12-07T01:24:45,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741909_1087 (size=95) 2024-12-07T01:24:45,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741909_1087 (size=95) 2024-12-07T01:24:45,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741909_1087 (size=95) 2024-12-07T01:24:45,669 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:45,669 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-44797613:(num 1733534685631) 2024-12-07T01:24:45,669 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:45,671 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:45,684 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671, exclude list is [], retry=0 2024-12-07T01:24:45,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:45,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:45,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:45,688 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671 2024-12-07T01:24:45,689 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:45,689 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => f304e33398d21a2f6ae84b657dc1122f, NAME => 'testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:45,689 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:45,689 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,689 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,691 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,691 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f304e33398d21a2f6ae84b657dc1122f columnFamilyName a 2024-12-07T01:24:45,691 DEBUG [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:45,692 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(327): Store=f304e33398d21a2f6ae84b657dc1122f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:45,692 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,692 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,694 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,694 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,694 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,696 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:45,697 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened f304e33398d21a2f6ae84b657dc1122f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70944752, jitterRate=0.0571591854095459}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:45,698 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for f304e33398d21a2f6ae84b657dc1122f: Writing region info on filesystem at 1733534685689Initializing all the Stores at 1733534685690 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534685690Cleaning up temporary data from old regions at 1733534685694 (+4 ms)Region opened successfully at 1733534685698 (+4 ms) 2024-12-07T01:24:45,706 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing f304e33398d21a2f6ae84b657dc1122f, disabling compactions & flushes 2024-12-07T01:24:45,706 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,706 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,706 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. after waiting 0 ms 2024-12-07T01:24:45,706 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,707 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,707 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. 2024-12-07T01:24:45,707 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for f304e33398d21a2f6ae84b657dc1122f: Waiting for close lock at 1733534685706Disabling compacts and flushes for region at 1733534685706Disabling writes for close at 1733534685706Writing region close event to WAL at 1733534685706Closed at 1733534685707 (+1 ms) 2024-12-07T01:24:45,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741911_1089 (size=1050) 2024-12-07T01:24:45,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741911_1089 (size=1050) 2024-12-07T01:24:45,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741911_1089 (size=1050) 2024-12-07T01:24:45,722 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671, size=1.0 K (1050bytes) 2024-12-07T01:24:45,722 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671 2024-12-07T01:24:45,723 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671 after 1ms 2024-12-07T01:24:45,725 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:45,725 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671 took 3ms 2024-12-07T01:24:45,727 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671 so closing down 2024-12-07T01:24:45,727 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:45,728 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733534685671.temp 2024-12-07T01:24:45,729 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000003-wal.1733534685671.temp 2024-12-07T01:24:45,729 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:45,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741912_1090 (size=1050) 2024-12-07T01:24:45,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741912_1090 (size=1050) 2024-12-07T01:24:45,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741912_1090 (size=1050) 2024-12-07T01:24:45,738 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000003-wal.1733534685671.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:45,739 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000003-wal.1733534685671.temp to hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012 2024-12-07T01:24:45,739 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 13 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-12-07T01:24:45,739 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671, journal: Splitting hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671, size=1.0 K (1050bytes) at 1733534685722Finishing writing output for hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671 so closing down at 1733534685727 (+5 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000003-wal.1733534685671.temp at 1733534685729 (+2 ms)3 split writer threads finished at 1733534685729Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000003-wal.1733534685671.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1733534685738 (+9 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000003-wal.1733534685671.temp to hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012 at 1733534685739 (+1 ms)Processed 10 edits across 1 Regions in 13 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1733534685739 2024-12-07T01:24:45,741 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534685671 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534685671 2024-12-07T01:24:45,742 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012 2024-12-07T01:24:45,746 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:46,068 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:46,070 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:46,081 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534686070, exclude list is [], retry=0 2024-12-07T01:24:46,083 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:46,084 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:46,084 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:46,086 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534686070 2024-12-07T01:24:46,086 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:46,086 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => f304e33398d21a2f6ae84b657dc1122f, NAME => 'testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:46,086 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:46,086 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,086 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,090 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,091 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f304e33398d21a2f6ae84b657dc1122f columnFamilyName a 2024-12-07T01:24:46,091 DEBUG [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,092 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(327): Store=f304e33398d21a2f6ae84b657dc1122f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,092 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,093 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,094 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,094 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012 2024-12-07T01:24:46,096 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:46,097 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012 2024-12-07T01:24:46,097 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f304e33398d21a2f6ae84b657dc1122f 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-12-07T01:24:46,115 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/.tmp/a/78f4ef12c0b945eb999709e7f4b05640 is 79, key is testDatalossWhenInputError/a:x0/1733534685698/Put/seqid=0 2024-12-07T01:24:46,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741914_1092 (size=5808) 2024-12-07T01:24:46,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741914_1092 (size=5808) 2024-12-07T01:24:46,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741914_1092 (size=5808) 2024-12-07T01:24:46,122 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/.tmp/a/78f4ef12c0b945eb999709e7f4b05640 2024-12-07T01:24:46,134 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/.tmp/a/78f4ef12c0b945eb999709e7f4b05640 as hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/a/78f4ef12c0b945eb999709e7f4b05640 2024-12-07T01:24:46,144 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/a/78f4ef12c0b945eb999709e7f4b05640, entries=10, sequenceid=12, filesize=5.7 K 2024-12-07T01:24:46,144 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for f304e33398d21a2f6ae84b657dc1122f in 47ms, sequenceid=12, compaction requested=false; wal=null 2024-12-07T01:24:46,146 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/0000000000000000012 2024-12-07T01:24:46,147 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,148 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,153 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,156 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-07T01:24:46,157 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened f304e33398d21a2f6ae84b657dc1122f; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70048956, jitterRate=0.04381078481674194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:46,158 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for f304e33398d21a2f6ae84b657dc1122f: Writing region info on filesystem at 1733534686086Initializing all the Stores at 1733534686089 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686089Obtaining lock to block concurrent updates at 1733534686097 (+8 ms)Preparing flush snapshotting stores in f304e33398d21a2f6ae84b657dc1122f at 1733534686097Finished memstore snapshotting testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1733534686098 (+1 ms)Flushing stores of testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f. at 1733534686098Flushing f304e33398d21a2f6ae84b657dc1122f/a: creating writer at 1733534686098Flushing f304e33398d21a2f6ae84b657dc1122f/a: appending metadata at 1733534686115 (+17 ms)Flushing f304e33398d21a2f6ae84b657dc1122f/a: closing flushed file at 1733534686115Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34240c52: reopening flushed file at 1733534686132 (+17 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for f304e33398d21a2f6ae84b657dc1122f in 47ms, sequenceid=12, compaction requested=false; wal=null at 1733534686144 (+12 ms)Cleaning up temporary data from old regions at 1733534686148 (+4 ms)Region opened successfully at 1733534686158 (+10 ms) 2024-12-07T01:24:46,162 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => f304e33398d21a2f6ae84b657dc1122f, NAME => 'testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:46,162 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733534685626.f304e33398d21a2f6ae84b657dc1122f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:46,162 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,162 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,163 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,164 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f304e33398d21a2f6ae84b657dc1122f columnFamilyName a 2024-12-07T01:24:46,165 DEBUG [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,170 DEBUG [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/a/78f4ef12c0b945eb999709e7f4b05640 2024-12-07T01:24:46,171 INFO [StoreOpener-f304e33398d21a2f6ae84b657dc1122f-1 {}] regionserver.HStore(327): Store=f304e33398d21a2f6ae84b657dc1122f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,171 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,172 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,173 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,174 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,174 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,176 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for f304e33398d21a2f6ae84b657dc1122f 2024-12-07T01:24:46,178 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testDatalossWhenInputError/f304e33398d21a2f6ae84b657dc1122f/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-12-07T01:24:46,179 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened f304e33398d21a2f6ae84b657dc1122f; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60529568, jitterRate=-0.09803915023803711}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T01:24:46,179 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for f304e33398d21a2f6ae84b657dc1122f: Writing region info on filesystem at 1733534686162Initializing all the Stores at 1733534686163 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686163Cleaning up temporary data from old regions at 1733534686174 (+11 ms)Region opened successfully at 1733534686179 (+5 ms) 2024-12-07T01:24:46,195 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=422 (was 412) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:42004 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:48606 [Waiting for operation #17] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44270 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:48708 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:42052 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1847206258-172.17.0.3-1733534647641:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44332 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1246 (was 1164) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=206 (was 206), ProcessCount=11 (was 11), AvailableMemoryMB=7625 (was 7642) 2024-12-07T01:24:46,196 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1246 is superior to 1024 2024-12-07T01:24:46,207 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=422, OpenFileDescriptor=1246, MaxFileDescriptor=1048576, SystemLoadAverage=206, ProcessCount=11, AvailableMemoryMB=7624 2024-12-07T01:24:46,207 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1246 is superior to 1024 2024-12-07T01:24:46,221 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:46,224 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:46,224 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:46,227 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-98041560, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-98041560, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:46,238 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-98041560/hregion-98041560.1733534686227, exclude list is [], retry=0 2024-12-07T01:24:46,241 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:46,241 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:46,241 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:46,243 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-98041560/hregion-98041560.1733534686227 2024-12-07T01:24:46,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:46,244 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => e8b4a6e1b87ea15d743cb3576bc1db51, NAME => 'testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741916_1094 (size=63) 2024-12-07T01:24:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741916_1094 (size=63) 2024-12-07T01:24:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741916_1094 (size=63) 2024-12-07T01:24:46,253 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:46,254 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,255 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName a 2024-12-07T01:24:46,255 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,255 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,255 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,256 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName b 2024-12-07T01:24:46,256 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,257 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,257 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,258 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName c 2024-12-07T01:24:46,258 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,258 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,258 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,259 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,259 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,260 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,260 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,261 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:46,262 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,266 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:46,267 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e8b4a6e1b87ea15d743cb3576bc1db51; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66253168, jitterRate=-0.012750864028930664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:46,268 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e8b4a6e1b87ea15d743cb3576bc1db51: Writing region info on filesystem at 1733534686253Initializing all the Stores at 1733534686253Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686253Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686254 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686254Cleaning up temporary data from old regions at 1733534686260 (+6 ms)Region opened successfully at 1733534686268 (+8 ms) 2024-12-07T01:24:46,268 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e8b4a6e1b87ea15d743cb3576bc1db51, disabling compactions & flushes 2024-12-07T01:24:46,268 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:46,268 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:46,268 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. after waiting 0 ms 2024-12-07T01:24:46,268 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:46,269 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:46,269 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e8b4a6e1b87ea15d743cb3576bc1db51: Waiting for close lock at 1733534686268Disabling compacts and flushes for region at 1733534686268Disabling writes for close at 1733534686268Writing region close event to WAL at 1733534686269 (+1 ms)Closed at 1733534686269 2024-12-07T01:24:46,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741915_1093 (size=95) 2024-12-07T01:24:46,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741915_1093 (size=95) 2024-12-07T01:24:46,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741915_1093 (size=95) 2024-12-07T01:24:46,276 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:46,276 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-98041560:(num 1733534686227) 2024-12-07T01:24:46,276 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:46,278 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:46,291 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278, exclude list is [], retry=0 2024-12-07T01:24:46,294 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:46,294 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:46,295 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:46,297 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 2024-12-07T01:24:46,298 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:46,298 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e8b4a6e1b87ea15d743cb3576bc1db51, NAME => 'testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:46,298 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:46,298 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,298 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,300 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,301 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName a 2024-12-07T01:24:46,301 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,301 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,301 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,302 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName b 2024-12-07T01:24:46,302 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,303 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,303 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,304 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName c 2024-12-07T01:24:46,304 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:46,304 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:46,304 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,305 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,305 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,306 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,306 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,307 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:46,308 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:46,309 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e8b4a6e1b87ea15d743cb3576bc1db51; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66490442, jitterRate=-0.009215205907821655}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:46,309 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e8b4a6e1b87ea15d743cb3576bc1db51: Writing region info on filesystem at 1733534686298Initializing all the Stores at 1733534686299 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686299Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686300 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534686300Cleaning up temporary data from old regions at 1733534686306 (+6 ms)Region opened successfully at 1733534686309 (+3 ms) 2024-12-07T01:24:46,313 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1733534686312/Put/seqid=0 2024-12-07T01:24:46,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741918_1096 (size=4875) 2024-12-07T01:24:46,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741918_1096 (size=4875) 2024-12-07T01:24:46,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741918_1096 (size=4875) 2024-12-07T01:24:46,325 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1733534686325/Put/seqid=0 2024-12-07T01:24:46,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741919_1097 (size=4875) 2024-12-07T01:24:46,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741919_1097 (size=4875) 2024-12-07T01:24:46,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741919_1097 (size=4875) 2024-12-07T01:24:46,338 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1733534686337/Put/seqid=0 2024-12-07T01:24:46,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741920_1098 (size=4875) 2024-12-07T01:24:46,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741920_1098 (size=4875) 2024-12-07T01:24:46,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741920_1098 (size=4875) 2024-12-07T01:24:46,344 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in e8b4a6e1b87ea15d743cb3576bc1db51/a 2024-12-07T01:24:46,349 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-12-07T01:24:46,349 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T01:24:46,349 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in e8b4a6e1b87ea15d743cb3576bc1db51/a 2024-12-07T01:24:46,352 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-12-07T01:24:46,352 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T01:24:46,352 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in e8b4a6e1b87ea15d743cb3576bc1db51/a 2024-12-07T01:24:46,356 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-12-07T01:24:46,356 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T01:24:46,356 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e8b4a6e1b87ea15d743cb3576bc1db51 3/3 column families, dataSize=51 B heapSize=896 B 2024-12-07T01:24:46,370 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/.tmp/a/b155bc46edc74912851f452974c5d82b is 55, key is testCompactedBulkLoadedFiles/a:a/1733534686309/Put/seqid=0 2024-12-07T01:24:46,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741921_1099 (size=5107) 2024-12-07T01:24:46,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741921_1099 (size=5107) 2024-12-07T01:24:46,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741921_1099 (size=5107) 2024-12-07T01:24:46,376 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/.tmp/a/b155bc46edc74912851f452974c5d82b 2024-12-07T01:24:46,381 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/.tmp/a/b155bc46edc74912851f452974c5d82b as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b 2024-12-07T01:24:46,386 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b, entries=1, sequenceid=4, filesize=5.0 K 2024-12-07T01:24:46,387 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for e8b4a6e1b87ea15d743cb3576bc1db51 in 31ms, sequenceid=4, compaction requested=false 2024-12-07T01:24:46,387 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e8b4a6e1b87ea15d743cb3576bc1db51: 2024-12-07T01:24:46,389 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_ 2024-12-07T01:24:46,390 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_ 2024-12-07T01:24:46,391 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_ 2024-12-07T01:24:46,392 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile0 into e8b4a6e1b87ea15d743cb3576bc1db51/a as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_ - updating store file list. 2024-12-07T01:24:46,397 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 8d848baaa27c4af3a8e1e71924af003d_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:46,397 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_ into e8b4a6e1b87ea15d743cb3576bc1db51/a 2024-12-07T01:24:46,397 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile0 into e8b4a6e1b87ea15d743cb3576bc1db51/a (new location: hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_) 2024-12-07T01:24:46,398 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile1 into e8b4a6e1b87ea15d743cb3576bc1db51/a as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_ - updating store file list. 2024-12-07T01:24:46,403 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 420d09da97c4499785a9883b1a34128f_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:46,403 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_ into e8b4a6e1b87ea15d743cb3576bc1db51/a 2024-12-07T01:24:46,403 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile1 into e8b4a6e1b87ea15d743cb3576bc1db51/a (new location: hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_) 2024-12-07T01:24:46,404 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile2 into e8b4a6e1b87ea15d743cb3576bc1db51/a as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_ - updating store file list. 2024-12-07T01:24:46,409 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:46,409 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_ into e8b4a6e1b87ea15d743cb3576bc1db51/a 2024-12-07T01:24:46,409 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42771/hbase/testCompactedBulkLoadedFiles/hfile2 into e8b4a6e1b87ea15d743cb3576bc1db51/a (new location: hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_) 2024-12-07T01:24:46,416 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T01:24:46,417 DEBUG [Time-limited test {}] regionserver.HStore(1541): e8b4a6e1b87ea15d743cb3576bc1db51/a is initiating major compaction (all files) 2024-12-07T01:24:46,417 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of e8b4a6e1b87ea15d743cb3576bc1db51/a in testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:46,417 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_] into tmpdir=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/.tmp, totalSize=19.3 K 2024-12-07T01:24:46,417 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b155bc46edc74912851f452974c5d82b, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1733534686309 2024-12-07T01:24:46,418 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 8d848baaa27c4af3a8e1e71924af003d_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-07T01:24:46,418 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 420d09da97c4499785a9883b1a34128f_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-07T01:24:46,419 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-07T01:24:46,432 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/.tmp/a/07346a0c74634415aea4269425e73cac is 55, key is testCompactedBulkLoadedFiles/a:a/1733534686309/Put/seqid=0 2024-12-07T01:24:46,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741922_1100 (size=6154) 2024-12-07T01:24:46,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741922_1100 (size=6154) 2024-12-07T01:24:46,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741922_1100 (size=6154) 2024-12-07T01:24:46,446 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/.tmp/a/07346a0c74634415aea4269425e73cac as hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/07346a0c74634415aea4269425e73cac 2024-12-07T01:24:46,451 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in e8b4a6e1b87ea15d743cb3576bc1db51/a of e8b4a6e1b87ea15d743cb3576bc1db51 into 07346a0c74634415aea4269425e73cac(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T01:24:46,451 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for e8b4a6e1b87ea15d743cb3576bc1db51: 2024-12-07T01:24:46,451 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-07T01:24:46,452 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-07T01:24:46,479 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278, size=0 (0bytes) 2024-12-07T01:24:46,479 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 might be still open, length is 0 2024-12-07T01:24:46,479 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 2024-12-07T01:24:46,480 WARN [IPC Server handler 2 on default port 42771 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 has not been closed. Lease recovery is in progress. RecoveryId = 1101 for block blk_1073741917_1095 2024-12-07T01:24:46,480 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 after 1ms 2024-12-07T01:24:49,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:48748 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:40681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48748 dst: /127.0.0.1:40681 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40681 remote=/127.0.0.1:48748]. Total timeout mills is 60000, 57204 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:49,248 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44370 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:38509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44370 dst: /127.0.0.1:38509 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:49,248 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:42080 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42080 dst: /127.0.0.1:35777 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:49,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741917_1101 (size=1168) 2024-12-07T01:24:49,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741917_1101 (size=1168) 2024-12-07T01:24:50,482 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 after 4002ms 2024-12-07T01:24:50,485 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:50,485 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 took 4006ms 2024-12-07T01:24:50,487 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278; continuing. 2024-12-07T01:24:50,487 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 so closing down 2024-12-07T01:24:50,487 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:50,488 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733534686278.temp 2024-12-07T01:24:50,490 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000003-wal.1733534686278.temp 2024-12-07T01:24:50,490 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741923_1102 (size=547) 2024-12-07T01:24:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741923_1102 (size=547) 2024-12-07T01:24:50,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741923_1102 (size=547) 2024-12-07T01:24:50,496 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000003-wal.1733534686278.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:50,498 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000003-wal.1733534686278.temp to hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000008 2024-12-07T01:24:50,498 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 12 ms; skipped=3; WAL=hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T01:24:50,498 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278, journal: Splitting hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278, size=0 (0bytes) at 1733534686479Finishing writing output for hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 so closing down at 1733534690487 (+4008 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000003-wal.1733534686278.temp at 1733534690490 (+3 ms)3 split writer threads finished at 1733534690490Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000003-wal.1733534686278.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733534690496 (+6 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000003-wal.1733534686278.temp to hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000008 at 1733534690498 (+2 ms)Processed 5 edits across 1 Regions in 12 ms; skipped=3; WAL=hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278, size=0, length=0, corrupted=false, cancelled=false at 1733534690498 2024-12-07T01:24:50,499 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534686278 2024-12-07T01:24:50,500 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000008 2024-12-07T01:24:50,501 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:50,503 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:50,535 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534690503, exclude list is [], retry=0 2024-12-07T01:24:50,538 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:50,538 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:50,538 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:50,540 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534690503 2024-12-07T01:24:50,540 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:50,540 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e8b4a6e1b87ea15d743cb3576bc1db51, NAME => 'testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:50,541 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:50,541 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,541 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,543 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,544 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName a 2024-12-07T01:24:50,544 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,553 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/07346a0c74634415aea4269425e73cac 2024-12-07T01:24:50,557 DEBUG [StoreFileOpener-e8b4a6e1b87ea15d743cb3576bc1db51-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:50,557 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_ 2024-12-07T01:24:50,561 DEBUG [StoreFileOpener-e8b4a6e1b87ea15d743cb3576bc1db51-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 420d09da97c4499785a9883b1a34128f_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:50,561 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_ 2024-12-07T01:24:50,565 DEBUG [StoreFileOpener-e8b4a6e1b87ea15d743cb3576bc1db51-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 8d848baaa27c4af3a8e1e71924af003d_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T01:24:50,565 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_ 2024-12-07T01:24:50,569 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b 2024-12-07T01:24:50,569 WARN [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@132bbd28 2024-12-07T01:24:50,569 WARN [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@132bbd28 2024-12-07T01:24:50,569 WARN [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@132bbd28 2024-12-07T01:24:50,569 WARN [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@132bbd28 2024-12-07T01:24:50,569 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b] to archive 2024-12-07T01:24:50,570 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T01:24:50,572 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_ to hdfs://localhost:42771/hbase/archive/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_ 2024-12-07T01:24:50,573 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_ to hdfs://localhost:42771/hbase/archive/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/420d09da97c4499785a9883b1a34128f_SeqId_4_ 2024-12-07T01:24:50,574 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_ to hdfs://localhost:42771/hbase/archive/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/8d848baaa27c4af3a8e1e71924af003d_SeqId_4_ 2024-12-07T01:24:50,574 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b to hdfs://localhost:42771/hbase/archive/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/a/b155bc46edc74912851f452974c5d82b 2024-12-07T01:24:50,574 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,575 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,575 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName b 2024-12-07T01:24:50,575 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,576 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,576 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,576 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8b4a6e1b87ea15d743cb3576bc1db51 columnFamilyName c 2024-12-07T01:24:50,576 DEBUG [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,577 INFO [StoreOpener-e8b4a6e1b87ea15d743cb3576bc1db51-1 {}] regionserver.HStore(327): Store=e8b4a6e1b87ea15d743cb3576bc1db51/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,577 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,578 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,579 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,579 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000008 2024-12-07T01:24:50,581 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:50,583 DEBUG [Time-limited test {}] regionserver.HRegion(5836): e8b4a6e1b87ea15d743cb3576bc1db51 : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "e8b4a6e1b87ea15d743cb3576bc1db51" family_name: "a" compaction_input: "b155bc46edc74912851f452974c5d82b" compaction_input: "8d848baaa27c4af3a8e1e71924af003d_SeqId_4_" compaction_input: "420d09da97c4499785a9883b1a34128f_SeqId_4_" compaction_input: "077a2c909cc549bcbc7cd43bed9beb54_SeqId_4_" compaction_output: "07346a0c74634415aea4269425e73cac" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-12-07T01:24:50,583 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-12-07T01:24:50,583 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000008 2024-12-07T01:24:50,584 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/0000000000000000008 2024-12-07T01:24:50,585 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,585 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,585 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:50,587 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e8b4a6e1b87ea15d743cb3576bc1db51 2024-12-07T01:24:50,590 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testCompactedBulkLoadedFiles/e8b4a6e1b87ea15d743cb3576bc1db51/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T01:24:50,591 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e8b4a6e1b87ea15d743cb3576bc1db51; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74793119, jitterRate=0.11450432240962982}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:50,591 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e8b4a6e1b87ea15d743cb3576bc1db51: Writing region info on filesystem at 1733534690541Initializing all the Stores at 1733534690542 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690542Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690543 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690543Cleaning up temporary data from old regions at 1733534690585 (+42 ms)Region opened successfully at 1733534690591 (+6 ms) 2024-12-07T01:24:50,593 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e8b4a6e1b87ea15d743cb3576bc1db51, disabling compactions & flushes 2024-12-07T01:24:50,593 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:50,593 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:50,593 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. after waiting 0 ms 2024-12-07T01:24:50,593 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:50,594 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733534686222.e8b4a6e1b87ea15d743cb3576bc1db51. 2024-12-07T01:24:50,594 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e8b4a6e1b87ea15d743cb3576bc1db51: Waiting for close lock at 1733534690593Disabling compacts and flushes for region at 1733534690593Disabling writes for close at 1733534690593Writing region close event to WAL at 1733534690594 (+1 ms)Closed at 1733534690594 2024-12-07T01:24:50,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741924_1103 (size=95) 2024-12-07T01:24:50,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741924_1103 (size=95) 2024-12-07T01:24:50,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741924_1103 (size=95) 2024-12-07T01:24:50,598 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:50,598 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733534690503) 2024-12-07T01:24:50,611 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=435 (was 422) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42771 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_559434587_22 at /127.0.0.1:42150 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:42771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_559434587_22 at /127.0.0.1:44446 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_559434587_22 at /127.0.0.1:48814 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1332 (was 1246) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=221 (was 206) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7577 (was 7624) 2024-12-07T01:24:50,612 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1332 is superior to 1024 2024-12-07T01:24:50,623 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=435, OpenFileDescriptor=1332, MaxFileDescriptor=1048576, SystemLoadAverage=221, ProcessCount=11, AvailableMemoryMB=7576 2024-12-07T01:24:50,623 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1332 is superior to 1024 2024-12-07T01:24:50,635 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:50,637 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T01:24:50,637 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T01:24:50,639 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-39244464, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/hregion-39244464, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:50,651 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-39244464/hregion-39244464.1733534690639, exclude list is [], retry=0 2024-12-07T01:24:50,653 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:50,653 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:50,654 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:50,655 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-39244464/hregion-39244464.1733534690639 2024-12-07T01:24:50,656 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597),(127.0.0.1/127.0.0.1:36997:36997)] 2024-12-07T01:24:50,656 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => eddea5b5796ff18b182d3d9754a0def0, NAME => 'testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42771/hbase 2024-12-07T01:24:50,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741926_1105 (size=67) 2024-12-07T01:24:50,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741926_1105 (size=67) 2024-12-07T01:24:50,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741926_1105 (size=67) 2024-12-07T01:24:50,667 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:50,668 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,669 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName a 2024-12-07T01:24:50,669 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,669 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,670 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,671 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName b 2024-12-07T01:24:50,671 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,671 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,671 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,672 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName c 2024-12-07T01:24:50,672 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,673 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,673 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,673 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,674 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,674 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,674 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,675 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:50,676 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,678 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T01:24:50,678 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened eddea5b5796ff18b182d3d9754a0def0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59375936, jitterRate=-0.11522960662841797}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:50,678 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for eddea5b5796ff18b182d3d9754a0def0: Writing region info on filesystem at 1733534690667Initializing all the Stores at 1733534690667Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690667Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690668 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690668Cleaning up temporary data from old regions at 1733534690675 (+7 ms)Region opened successfully at 1733534690678 (+3 ms) 2024-12-07T01:24:50,679 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing eddea5b5796ff18b182d3d9754a0def0, disabling compactions & flushes 2024-12-07T01:24:50,679 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,679 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,679 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. after waiting 0 ms 2024-12-07T01:24:50,679 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,679 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,679 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for eddea5b5796ff18b182d3d9754a0def0: Waiting for close lock at 1733534690679Disabling compacts and flushes for region at 1733534690679Disabling writes for close at 1733534690679Writing region close event to WAL at 1733534690679Closed at 1733534690679 2024-12-07T01:24:50,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741925_1104 (size=95) 2024-12-07T01:24:50,683 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-39244464/hregion-39244464.1733534690639 not finished, retry = 0 2024-12-07T01:24:50,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741925_1104 (size=95) 2024-12-07T01:24:50,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741925_1104 (size=95) 2024-12-07T01:24:50,788 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:50,788 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-39244464:(num 1733534690639) 2024-12-07T01:24:50,788 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:50,792 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:50,805 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792, exclude list is [], retry=0 2024-12-07T01:24:50,807 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:50,808 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:50,808 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:50,809 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792 2024-12-07T01:24:50,810 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:50,810 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => eddea5b5796ff18b182d3d9754a0def0, NAME => 'testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:50,810 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:50,810 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,810 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,811 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,812 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName a 2024-12-07T01:24:50,812 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,813 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,813 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,813 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName b 2024-12-07T01:24:50,814 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,814 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,814 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,815 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName c 2024-12-07T01:24:50,815 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,815 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,815 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,816 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,817 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,818 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,818 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,818 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:50,819 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,820 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened eddea5b5796ff18b182d3d9754a0def0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73745296, jitterRate=0.09889054298400879}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:50,820 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for eddea5b5796ff18b182d3d9754a0def0: Writing region info on filesystem at 1733534690810Initializing all the Stores at 1733534690811 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690811Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690811Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690811Cleaning up temporary data from old regions at 1733534690818 (+7 ms)Region opened successfully at 1733534690820 (+2 ms) 2024-12-07T01:24:50,827 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing eddea5b5796ff18b182d3d9754a0def0 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-07T01:24:50,841 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/a/52959e14a1154de2b42aa6cab8485222 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733534690820/Put/seqid=0 2024-12-07T01:24:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741928_1107 (size=5958) 2024-12-07T01:24:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741928_1107 (size=5958) 2024-12-07T01:24:50,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741928_1107 (size=5958) 2024-12-07T01:24:50,848 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/a/52959e14a1154de2b42aa6cab8485222 2024-12-07T01:24:50,853 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/a/52959e14a1154de2b42aa6cab8485222 as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/a/52959e14a1154de2b42aa6cab8485222 2024-12-07T01:24:50,857 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/a/52959e14a1154de2b42aa6cab8485222, entries=10, sequenceid=13, filesize=5.8 K 2024-12-07T01:24:50,859 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for eddea5b5796ff18b182d3d9754a0def0 in 31ms, sequenceid=13, compaction requested=false 2024-12-07T01:24:50,859 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for eddea5b5796ff18b182d3d9754a0def0: 2024-12-07T01:24:50,876 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing eddea5b5796ff18b182d3d9754a0def0, disabling compactions & flushes 2024-12-07T01:24:50,876 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,876 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,876 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. after waiting 0 ms 2024-12-07T01:24:50,876 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,877 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,877 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:50,877 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for eddea5b5796ff18b182d3d9754a0def0: Waiting for close lock at 1733534690876Disabling compacts and flushes for region at 1733534690876Disabling writes for close at 1733534690876Writing region close event to WAL at 1733534690877 (+1 ms)Closed at 1733534690877 2024-12-07T01:24:50,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741927_1106 (size=3346) 2024-12-07T01:24:50,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741927_1106 (size=3346) 2024-12-07T01:24:50,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741927_1106 (size=3346) 2024-12-07T01:24:50,895 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792, size=3.3 K (3346bytes) 2024-12-07T01:24:50,895 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792 2024-12-07T01:24:50,896 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792 after 1ms 2024-12-07T01:24:50,898 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:50,898 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792 took 3ms 2024-12-07T01:24:50,900 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792 so closing down 2024-12-07T01:24:50,900 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:50,901 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733534690792.temp 2024-12-07T01:24:50,902 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000003-wal.1733534690792.temp 2024-12-07T01:24:50,903 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:50,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741929_1108 (size=2944) 2024-12-07T01:24:50,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741929_1108 (size=2944) 2024-12-07T01:24:50,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741929_1108 (size=2944) 2024-12-07T01:24:50,909 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000003-wal.1733534690792.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:50,911 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000003-wal.1733534690792.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000035 2024-12-07T01:24:50,911 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 12 ms; skipped=2; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792, size=3.3 K, length=3346, corrupted=false, cancelled=false 2024-12-07T01:24:50,911 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792, journal: Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792, size=3.3 K (3346bytes) at 1733534690895Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792 so closing down at 1733534690900 (+5 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000003-wal.1733534690792.temp at 1733534690902 (+2 ms)3 split writer threads finished at 1733534690903 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000003-wal.1733534690792.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733534690909 (+6 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000003-wal.1733534690792.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000035 at 1733534690911 (+2 ms)Processed 32 edits across 1 Regions in 12 ms; skipped=2; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792, size=3.3 K, length=3346, corrupted=false, cancelled=false at 1733534690911 2024-12-07T01:24:50,913 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690792 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534690792 2024-12-07T01:24:50,913 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000035 2024-12-07T01:24:50,914 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:50,915 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:50,927 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915, exclude list is [], retry=0 2024-12-07T01:24:50,929 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:50,930 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:50,930 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:50,931 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 2024-12-07T01:24:50,932 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:50,932 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => eddea5b5796ff18b182d3d9754a0def0, NAME => 'testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0.', STARTKEY => '', ENDKEY => ''} 2024-12-07T01:24:50,932 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:50,932 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,932 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,932 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T01:24:50,934 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,935 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName a 2024-12-07T01:24:50,935 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,941 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/a/52959e14a1154de2b42aa6cab8485222 2024-12-07T01:24:50,941 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,941 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,942 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName b 2024-12-07T01:24:50,942 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,943 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,943 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,944 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName c 2024-12-07T01:24:50,944 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:50,944 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:50,944 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,945 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,946 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:50,947 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000035 2024-12-07T01:24:50,949 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:50,950 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000035 2024-12-07T01:24:50,950 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing eddea5b5796ff18b182d3d9754a0def0 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-12-07T01:24:50,969 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/b/75d0ed850e804f54b85fd39f5a2f0608 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733534690859/Put/seqid=0 2024-12-07T01:24:50,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741931_1110 (size=5958) 2024-12-07T01:24:50,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741931_1110 (size=5958) 2024-12-07T01:24:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741931_1110 (size=5958) 2024-12-07T01:24:50,976 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/b/75d0ed850e804f54b85fd39f5a2f0608 2024-12-07T01:24:50,995 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/c/e605970889f84b06b466731f90312382 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733534690865/Put/seqid=0 2024-12-07T01:24:51,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741932_1111 (size=5958) 2024-12-07T01:24:51,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741932_1111 (size=5958) 2024-12-07T01:24:51,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741932_1111 (size=5958) 2024-12-07T01:24:51,002 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/c/e605970889f84b06b466731f90312382 2024-12-07T01:24:51,006 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/b/75d0ed850e804f54b85fd39f5a2f0608 as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/b/75d0ed850e804f54b85fd39f5a2f0608 2024-12-07T01:24:51,011 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/b/75d0ed850e804f54b85fd39f5a2f0608, entries=10, sequenceid=35, filesize=5.8 K 2024-12-07T01:24:51,012 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/c/e605970889f84b06b466731f90312382 as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/c/e605970889f84b06b466731f90312382 2024-12-07T01:24:51,017 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/c/e605970889f84b06b466731f90312382, entries=10, sequenceid=35, filesize=5.8 K 2024-12-07T01:24:51,017 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for eddea5b5796ff18b182d3d9754a0def0 in 67ms, sequenceid=35, compaction requested=false; wal=null 2024-12-07T01:24:51,018 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000035 2024-12-07T01:24:51,020 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:51,020 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:51,021 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:51,022 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:51,024 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-12-07T01:24:51,025 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened eddea5b5796ff18b182d3d9754a0def0; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67263894, jitterRate=0.002310127019882202}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:51,025 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for eddea5b5796ff18b182d3d9754a0def0: Writing region info on filesystem at 1733534690932Initializing all the Stores at 1733534690933 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690933Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690933Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534690933Obtaining lock to block concurrent updates at 1733534690950 (+17 ms)Preparing flush snapshotting stores in eddea5b5796ff18b182d3d9754a0def0 at 1733534690950Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1733534690950Flushing stores of testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. at 1733534690950Flushing eddea5b5796ff18b182d3d9754a0def0/b: creating writer at 1733534690950Flushing eddea5b5796ff18b182d3d9754a0def0/b: appending metadata at 1733534690968 (+18 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/b: closing flushed file at 1733534690968Flushing eddea5b5796ff18b182d3d9754a0def0/c: creating writer at 1733534690981 (+13 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/c: appending metadata at 1733534690994 (+13 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/c: closing flushed file at 1733534690994Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31ee1ead: reopening flushed file at 1733534691005 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74fab754: reopening flushed file at 1733534691012 (+7 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for eddea5b5796ff18b182d3d9754a0def0 in 67ms, sequenceid=35, compaction requested=false; wal=null at 1733534691017 (+5 ms)Cleaning up temporary data from old regions at 1733534691020 (+3 ms)Region opened successfully at 1733534691025 (+5 ms) 2024-12-07T01:24:51,096 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915, size=0 (0bytes) 2024-12-07T01:24:51,096 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 might be still open, length is 0 2024-12-07T01:24:51,096 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 2024-12-07T01:24:51,097 WARN [IPC Server handler 1 on default port 42771 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 has not been closed. Lease recovery is in progress. RecoveryId = 1112 for block blk_1073741930_1109 2024-12-07T01:24:51,097 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 after 1ms 2024-12-07T01:24:52,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:42190 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42190 dst: /127.0.0.1:35777 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35777 remote=/127.0.0.1:42190]. Total timeout mills is 60000, 58812 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:52,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:48868 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:40681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48868 dst: /127.0.0.1:40681 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:52,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_103124788_22 at /127.0.0.1:44530 [Receiving block BP-1847206258-172.17.0.3-1733534647641:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:38509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44530 dst: /127.0.0.1:38509 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:52,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741930_1112 (size=2936) 2024-12-07T01:24:52,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741930_1112 (size=2936) 2024-12-07T01:24:52,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741930_1112 (size=2936) 2024-12-07T01:24:54,575 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-12-07T01:24:54,575 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:54,577 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-12-07T01:24:54,577 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-12-07T01:24:55,098 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 after 4002ms 2024-12-07T01:24:55,106 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:55,107 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 took 4011ms 2024-12-07T01:24:55,109 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915; continuing. 2024-12-07T01:24:55,109 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 so closing down 2024-12-07T01:24:55,109 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T01:24:55,110 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1733534690915.temp 2024-12-07T01:24:55,111 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000037-wal.1733534690915.temp 2024-12-07T01:24:55,112 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T01:24:55,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741933_1113 (size=2944) 2024-12-07T01:24:55,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741933_1113 (size=2944) 2024-12-07T01:24:55,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741933_1113 (size=2944) 2024-12-07T01:24:55,121 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000037-wal.1733534690915.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-07T01:24:55,122 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000037-wal.1733534690915.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000066 2024-12-07T01:24:55,122 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T01:24:55,122 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915, journal: Splitting hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915, size=0 (0bytes) at 1733534691096Finishing writing output for hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 so closing down at 1733534695109 (+4013 ms)Creating recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000037-wal.1733534690915.temp at 1733534695111 (+2 ms)3 split writer threads finished at 1733534695112 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000037-wal.1733534690915.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733534695121 (+9 ms)Rename recovered edits hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000037-wal.1733534690915.temp to hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000066 at 1733534695122 (+1 ms)Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915, size=0, length=0, corrupted=false, cancelled=false at 1733534695122 2024-12-07T01:24:55,123 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 to hdfs://localhost:42771/hbase/oldWALs/wal.1733534690915 2024-12-07T01:24:55,124 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000066 2024-12-07T01:24:55,124 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T01:24:55,126 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42771/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634, archiveDir=hdfs://localhost:42771/hbase/oldWALs, maxLogs=32 2024-12-07T01:24:55,137 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534695126, exclude list is [], retry=0 2024-12-07T01:24:55,139 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40681,DS-767c29ce-7be5-4668-b2c0-f46bb398d859,DISK] 2024-12-07T01:24:55,139 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35777,DS-c552b59d-4933-4b9c-acce-bfbec98f86fe,DISK] 2024-12-07T01:24:55,140 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38509,DS-226c85da-7fec-4c4f-9523-176d70b8f943,DISK] 2024-12-07T01:24:55,141 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534695126 2024-12-07T01:24:55,141 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36997:36997),(127.0.0.1/127.0.0.1:41487:41487),(127.0.0.1/127.0.0.1:40597:40597)] 2024-12-07T01:24:55,142 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T01:24:55,143 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,143 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName a 2024-12-07T01:24:55,144 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:55,148 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/a/52959e14a1154de2b42aa6cab8485222 2024-12-07T01:24:55,148 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:55,148 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,149 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName b 2024-12-07T01:24:55,150 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:55,154 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/b/75d0ed850e804f54b85fd39f5a2f0608 2024-12-07T01:24:55,154 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:55,154 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,155 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eddea5b5796ff18b182d3d9754a0def0 columnFamilyName c 2024-12-07T01:24:55,155 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T01:24:55,161 DEBUG [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/c/e605970889f84b06b466731f90312382 2024-12-07T01:24:55,161 INFO [StoreOpener-eddea5b5796ff18b182d3d9754a0def0-1 {}] regionserver.HStore(327): Store=eddea5b5796ff18b182d3d9754a0def0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T01:24:55,161 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,162 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,164 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,164 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000066 2024-12-07T01:24:55,166 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-07T01:24:55,169 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000066 2024-12-07T01:24:55,169 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing eddea5b5796ff18b182d3d9754a0def0 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-07T01:24:55,183 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/a/8bb5b9aae16648759b2da824e115c5ad is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1733534691032/Put/seqid=0 2024-12-07T01:24:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741935_1115 (size=5958) 2024-12-07T01:24:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741935_1115 (size=5958) 2024-12-07T01:24:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741935_1115 (size=5958) 2024-12-07T01:24:55,190 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/a/8bb5b9aae16648759b2da824e115c5ad 2024-12-07T01:24:55,206 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/b/57b18a8eaaa446cba156bf916aac7fad is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1733534691039/Put/seqid=0 2024-12-07T01:24:55,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741936_1116 (size=5958) 2024-12-07T01:24:55,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741936_1116 (size=5958) 2024-12-07T01:24:55,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741936_1116 (size=5958) 2024-12-07T01:24:55,213 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/b/57b18a8eaaa446cba156bf916aac7fad 2024-12-07T01:24:55,228 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/c/8a785aa95e164319bbaf2fef2f70be0b is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1733534691046/Put/seqid=0 2024-12-07T01:24:55,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741937_1117 (size=5958) 2024-12-07T01:24:55,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741937_1117 (size=5958) 2024-12-07T01:24:55,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741937_1117 (size=5958) 2024-12-07T01:24:55,235 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/c/8a785aa95e164319bbaf2fef2f70be0b 2024-12-07T01:24:55,240 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/a/8bb5b9aae16648759b2da824e115c5ad as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/a/8bb5b9aae16648759b2da824e115c5ad 2024-12-07T01:24:55,244 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/a/8bb5b9aae16648759b2da824e115c5ad, entries=10, sequenceid=66, filesize=5.8 K 2024-12-07T01:24:55,245 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/b/57b18a8eaaa446cba156bf916aac7fad as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/b/57b18a8eaaa446cba156bf916aac7fad 2024-12-07T01:24:55,250 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/b/57b18a8eaaa446cba156bf916aac7fad, entries=10, sequenceid=66, filesize=5.8 K 2024-12-07T01:24:55,251 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/.tmp/c/8a785aa95e164319bbaf2fef2f70be0b as hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/c/8a785aa95e164319bbaf2fef2f70be0b 2024-12-07T01:24:55,256 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/c/8a785aa95e164319bbaf2fef2f70be0b, entries=10, sequenceid=66, filesize=5.8 K 2024-12-07T01:24:55,257 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for eddea5b5796ff18b182d3d9754a0def0 in 88ms, sequenceid=66, compaction requested=false; wal=null 2024-12-07T01:24:55,257 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/0000000000000000066 2024-12-07T01:24:55,259 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,259 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,259 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T01:24:55,261 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for eddea5b5796ff18b182d3d9754a0def0 2024-12-07T01:24:55,263 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/hbase/data/default/testReplayEditsWrittenViaHRegion/eddea5b5796ff18b182d3d9754a0def0/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-12-07T01:24:55,264 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened eddea5b5796ff18b182d3d9754a0def0; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72894881, jitterRate=0.08621837198734283}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T01:24:55,264 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for eddea5b5796ff18b182d3d9754a0def0: Writing region info on filesystem at 1733534695142Initializing all the Stores at 1733534695142Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534695142Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534695143 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733534695143Obtaining lock to block concurrent updates at 1733534695169 (+26 ms)Preparing flush snapshotting stores in eddea5b5796ff18b182d3d9754a0def0 at 1733534695169Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1733534695169Flushing stores of testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. at 1733534695169Flushing eddea5b5796ff18b182d3d9754a0def0/a: creating writer at 1733534695170 (+1 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/a: appending metadata at 1733534695182 (+12 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/a: closing flushed file at 1733534695182Flushing eddea5b5796ff18b182d3d9754a0def0/b: creating writer at 1733534695193 (+11 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/b: appending metadata at 1733534695206 (+13 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/b: closing flushed file at 1733534695206Flushing eddea5b5796ff18b182d3d9754a0def0/c: creating writer at 1733534695216 (+10 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/c: appending metadata at 1733534695228 (+12 ms)Flushing eddea5b5796ff18b182d3d9754a0def0/c: closing flushed file at 1733534695228Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@341723b9: reopening flushed file at 1733534695239 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1285293f: reopening flushed file at 1733534695244 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a14f47: reopening flushed file at 1733534695250 (+6 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for eddea5b5796ff18b182d3d9754a0def0 in 88ms, sequenceid=66, compaction requested=false; wal=null at 1733534695257 (+7 ms)Cleaning up temporary data from old regions at 1733534695259 (+2 ms)Region opened successfully at 1733534695264 (+5 ms) 2024-12-07T01:24:55,277 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing eddea5b5796ff18b182d3d9754a0def0, disabling compactions & flushes 2024-12-07T01:24:55,277 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:55,277 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:55,277 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. after waiting 0 ms 2024-12-07T01:24:55,277 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:55,279 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733534690636.eddea5b5796ff18b182d3d9754a0def0. 2024-12-07T01:24:55,279 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for eddea5b5796ff18b182d3d9754a0def0: Waiting for close lock at 1733534695277Disabling compacts and flushes for region at 1733534695277Disabling writes for close at 1733534695277Writing region close event to WAL at 1733534695279 (+2 ms)Closed at 1733534695279 2024-12-07T01:24:55,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741934_1114 (size=95) 2024-12-07T01:24:55,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741934_1114 (size=95) 2024-12-07T01:24:55,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741934_1114 (size=95) 2024-12-07T01:24:55,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T01:24:55,283 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733534695126) 2024-12-07T01:24:55,295 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=440 (was 435) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42771 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-342659943_22 at /127.0.0.1:38404 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:42771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-342659943_22 at /127.0.0.1:47474 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-342659943_22 at /127.0.0.1:58388 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1398 (was 1332) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 221), ProcessCount=11 (was 11), AvailableMemoryMB=7572 (was 7576) 2024-12-07T01:24:55,295 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1398 is superior to 1024 2024-12-07T01:24:55,295 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T01:24:55,296 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T01:24:55,296 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T01:24:55,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:55,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:55,296 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T01:24:55,297 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T01:24:55,297 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=954470236, stopped=false 2024-12-07T01:24:55,297 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ec1863dc21e5,40763,1733534654247 2024-12-07T01:24:55,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T01:24:55,431 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T01:24:55,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:55,431 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:55,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T01:24:55,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:55,432 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T01:24:55,433 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T01:24:55,433 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T01:24:55,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:55,435 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ec1863dc21e5,45471,1733534655575' ***** 2024-12-07T01:24:55,435 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T01:24:55,435 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ec1863dc21e5,42825,1733534655854' ***** 2024-12-07T01:24:55,435 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T01:24:55,435 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T01:24:55,435 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T01:24:55,435 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T01:24:55,435 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T01:24:55,435 INFO [RS:0;ec1863dc21e5:45471 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T01:24:55,436 INFO [RS:0;ec1863dc21e5:45471 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T01:24:55,436 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T01:24:55,436 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(3091): Received CLOSE for 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T01:24:55,436 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(959): stopping server ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(959): stopping server ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ec1863dc21e5:42825. 2024-12-07T01:24:55,436 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T01:24:55,436 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T01:24:55,436 DEBUG [RS:2;ec1863dc21e5:42825 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T01:24:55,436 DEBUG [RS:2;ec1863dc21e5:42825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:55,436 INFO [RS:0;ec1863dc21e5:45471 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ec1863dc21e5:45471. 2024-12-07T01:24:55,436 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4df18ab8d6a713905826e338f7d67d7c, disabling compactions & flushes 2024-12-07T01:24:55,436 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:55,436 DEBUG [RS:0;ec1863dc21e5:45471 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T01:24:55,436 DEBUG [RS:0;ec1863dc21e5:45471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:55,436 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T01:24:55,436 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:55,437 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T01:24:55,437 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. after waiting 0 ms 2024-12-07T01:24:55,437 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:55,437 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T01:24:55,437 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T01:24:55,437 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1325): Online Regions={4df18ab8d6a713905826e338f7d67d7c=testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c.} 2024-12-07T01:24:55,437 DEBUG [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1351): Waiting on 4df18ab8d6a713905826e338f7d67d7c 2024-12-07T01:24:55,437 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T01:24:55,437 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T01:24:55,437 DEBUG [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T01:24:55,437 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T01:24:55,437 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T01:24:55,437 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T01:24:55,438 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T01:24:55,438 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T01:24:55,438 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=6.86 KB heapSize=11.45 KB 2024-12-07T01:24:55,442 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4df18ab8d6a713905826e338f7d67d7c/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-12-07T01:24:55,443 INFO [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:55,443 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4df18ab8d6a713905826e338f7d67d7c: Waiting for close lock at 1733534695436Running coprocessor pre-close hooks at 1733534695436Disabling compacts and flushes for region at 1733534695436Disabling writes for close at 1733534695437 (+1 ms)Writing region close event to WAL at 1733534695439 (+2 ms)Running coprocessor post-close hooks at 1733534695443 (+4 ms)Closed at 1733534695443 2024-12-07T01:24:55,443 DEBUG [RS_CLOSE_REGION-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c. 2024-12-07T01:24:55,456 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/info/36157e9380304dedac7bf4fdc47c6e78 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733534672069.4df18ab8d6a713905826e338f7d67d7c./info:regioninfo/1733534675279/Put/seqid=0 2024-12-07T01:24:55,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741938_1118 (size=8243) 2024-12-07T01:24:55,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741938_1118 (size=8243) 2024-12-07T01:24:55,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741938_1118 (size=8243) 2024-12-07T01:24:55,463 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.65 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/info/36157e9380304dedac7bf4fdc47c6e78 2024-12-07T01:24:55,482 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/ns/8211b25b8cf94db18b3775ceda9e39be is 43, key is default/ns:d/1733534659282/Put/seqid=0 2024-12-07T01:24:55,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741939_1119 (size=5153) 2024-12-07T01:24:55,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741939_1119 (size=5153) 2024-12-07T01:24:55,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741939_1119 (size=5153) 2024-12-07T01:24:55,488 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/ns/8211b25b8cf94db18b3775ceda9e39be 2024-12-07T01:24:55,489 INFO [regionserver/ec1863dc21e5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T01:24:55,489 INFO [regionserver/ec1863dc21e5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T01:24:55,506 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/table/09f562068a9948c2ab0f0d6b15bf009c is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1733534672501/Put/seqid=0 2024-12-07T01:24:55,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741940_1120 (size=5431) 2024-12-07T01:24:55,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741940_1120 (size=5431) 2024-12-07T01:24:55,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741940_1120 (size=5431) 2024-12-07T01:24:55,512 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/table/09f562068a9948c2ab0f0d6b15bf009c 2024-12-07T01:24:55,517 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/info/36157e9380304dedac7bf4fdc47c6e78 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/info/36157e9380304dedac7bf4fdc47c6e78 2024-12-07T01:24:55,522 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/info/36157e9380304dedac7bf4fdc47c6e78, entries=18, sequenceid=21, filesize=8.0 K 2024-12-07T01:24:55,523 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/ns/8211b25b8cf94db18b3775ceda9e39be as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/ns/8211b25b8cf94db18b3775ceda9e39be 2024-12-07T01:24:55,527 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/ns/8211b25b8cf94db18b3775ceda9e39be, entries=2, sequenceid=21, filesize=5.0 K 2024-12-07T01:24:55,529 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/.tmp/table/09f562068a9948c2ab0f0d6b15bf009c as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/table/09f562068a9948c2ab0f0d6b15bf009c 2024-12-07T01:24:55,533 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/table/09f562068a9948c2ab0f0d6b15bf009c, entries=2, sequenceid=21, filesize=5.3 K 2024-12-07T01:24:55,534 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=21, compaction requested=false 2024-12-07T01:24:55,538 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-07T01:24:55,539 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T01:24:55,539 INFO [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T01:24:55,539 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733534695437Running coprocessor pre-close hooks at 1733534695437Disabling compacts and flushes for region at 1733534695437Disabling writes for close at 1733534695438 (+1 ms)Obtaining lock to block concurrent updates at 1733534695438Preparing flush snapshotting stores in 1588230740 at 1733534695438Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=7029, getHeapSize=11664, getOffHeapSize=0, getCellsCount=48 at 1733534695438Flushing stores of hbase:meta,,1.1588230740 at 1733534695439 (+1 ms)Flushing 1588230740/info: creating writer at 1733534695439Flushing 1588230740/info: appending metadata at 1733534695456 (+17 ms)Flushing 1588230740/info: closing flushed file at 1733534695456Flushing 1588230740/ns: creating writer at 1733534695468 (+12 ms)Flushing 1588230740/ns: appending metadata at 1733534695481 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733534695481Flushing 1588230740/table: creating writer at 1733534695492 (+11 ms)Flushing 1588230740/table: appending metadata at 1733534695505 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733534695505Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@aa62b1c: reopening flushed file at 1733534695516 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a5d2cf0: reopening flushed file at 1733534695522 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52e65ca0: reopening flushed file at 1733534695528 (+6 ms)Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=21, compaction requested=false at 1733534695534 (+6 ms)Writing region close event to WAL at 1733534695536 (+2 ms)Running coprocessor post-close hooks at 1733534695539 (+3 ms)Closed at 1733534695539 2024-12-07T01:24:55,539 DEBUG [RS_CLOSE_META-regionserver/ec1863dc21e5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T01:24:55,637 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(976): stopping server ec1863dc21e5,45471,1733534655575; all regions closed. 2024-12-07T01:24:55,638 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(976): stopping server ec1863dc21e5,42825,1733534655854; all regions closed. 2024-12-07T01:24:55,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741834_1010 (size=2187) 2024-12-07T01:24:55,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741836_1012 (size=4709) 2024-12-07T01:24:55,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741834_1010 (size=2187) 2024-12-07T01:24:55,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741836_1012 (size=4709) 2024-12-07T01:24:55,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741836_1012 (size=4709) 2024-12-07T01:24:55,647 DEBUG [RS:0;ec1863dc21e5:45471 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs 2024-12-07T01:24:55,647 INFO [RS:0;ec1863dc21e5:45471 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ec1863dc21e5%2C45471%2C1733534655575:(num 1733534658407) 2024-12-07T01:24:55,647 DEBUG [RS:0;ec1863dc21e5:45471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:55,647 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T01:24:55,647 DEBUG [RS:2;ec1863dc21e5:42825 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs 2024-12-07T01:24:55,648 INFO [RS:2;ec1863dc21e5:42825 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ec1863dc21e5%2C42825%2C1733534655854.meta:.meta(num 1733534659118) 2024-12-07T01:24:55,648 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T01:24:55,648 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.ChoreService(370): Chore service for: regionserver/ec1863dc21e5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T01:24:55,648 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T01:24:55,648 INFO [regionserver/ec1863dc21e5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T01:24:55,648 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T01:24:55,648 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T01:24:55,648 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T01:24:55,649 INFO [RS:0;ec1863dc21e5:45471 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45471 2024-12-07T01:24:55,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741835_1011 (size=95) 2024-12-07T01:24:55,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741835_1011 (size=95) 2024-12-07T01:24:55,653 DEBUG [RS:2;ec1863dc21e5:42825 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/oldWALs 2024-12-07T01:24:55,653 INFO [RS:2;ec1863dc21e5:42825 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ec1863dc21e5%2C42825%2C1733534655854:(num 1733534658407) 2024-12-07T01:24:55,653 DEBUG [RS:2;ec1863dc21e5:42825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T01:24:55,653 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T01:24:55,653 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T01:24:55,653 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.ChoreService(370): Chore service for: regionserver/ec1863dc21e5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T01:24:55,653 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T01:24:55,653 INFO [regionserver/ec1863dc21e5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T01:24:55,654 INFO [RS:2;ec1863dc21e5:42825 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42825 2024-12-07T01:24:55,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ec1863dc21e5,45471,1733534655575 2024-12-07T01:24:55,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T01:24:55,665 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T01:24:55,675 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ec1863dc21e5,42825,1733534655854 2024-12-07T01:24:55,675 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T01:24:55,686 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ec1863dc21e5,42825,1733534655854] 2024-12-07T01:24:55,707 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ec1863dc21e5,42825,1733534655854 already deleted, retry=false 2024-12-07T01:24:55,707 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ec1863dc21e5,42825,1733534655854 expired; onlineServers=1 2024-12-07T01:24:55,707 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ec1863dc21e5,45471,1733534655575] 2024-12-07T01:24:55,718 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ec1863dc21e5,45471,1733534655575 already deleted, retry=false 2024-12-07T01:24:55,718 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ec1863dc21e5,45471,1733534655575 expired; onlineServers=0 2024-12-07T01:24:55,718 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ec1863dc21e5,40763,1733534654247' ***** 2024-12-07T01:24:55,718 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T01:24:55,718 INFO [M:0;ec1863dc21e5:40763 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T01:24:55,718 INFO [M:0;ec1863dc21e5:40763 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T01:24:55,719 DEBUG [M:0;ec1863dc21e5:40763 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T01:24:55,719 DEBUG [M:0;ec1863dc21e5:40763 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T01:24:55,719 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T01:24:55,719 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster-HFileCleaner.small.0-1733534657678 {}] cleaner.HFileCleaner(306): Exit Thread[master/ec1863dc21e5:0:becomeActiveMaster-HFileCleaner.small.0-1733534657678,5,FailOnTimeoutGroup] 2024-12-07T01:24:55,719 DEBUG [master/ec1863dc21e5:0:becomeActiveMaster-HFileCleaner.large.0-1733534657674 {}] cleaner.HFileCleaner(306): Exit Thread[master/ec1863dc21e5:0:becomeActiveMaster-HFileCleaner.large.0-1733534657674,5,FailOnTimeoutGroup] 2024-12-07T01:24:55,719 INFO [M:0;ec1863dc21e5:40763 {}] hbase.ChoreService(370): Chore service for: master/ec1863dc21e5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T01:24:55,720 INFO [M:0;ec1863dc21e5:40763 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T01:24:55,720 DEBUG [M:0;ec1863dc21e5:40763 {}] master.HMaster(1795): Stopping service threads 2024-12-07T01:24:55,720 INFO [M:0;ec1863dc21e5:40763 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T01:24:55,720 INFO [M:0;ec1863dc21e5:40763 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T01:24:55,721 INFO [M:0;ec1863dc21e5:40763 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T01:24:55,721 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T01:24:55,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T01:24:55,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T01:24:55,729 DEBUG [M:0;ec1863dc21e5:40763 {}] zookeeper.ZKUtil(347): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T01:24:55,729 WARN [M:0;ec1863dc21e5:40763 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T01:24:55,730 INFO [M:0;ec1863dc21e5:40763 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/.lastflushedseqids 2024-12-07T01:24:55,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741941_1121 (size=138) 2024-12-07T01:24:55,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741941_1121 (size=138) 2024-12-07T01:24:55,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741941_1121 (size=138) 2024-12-07T01:24:55,746 INFO [M:0;ec1863dc21e5:40763 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T01:24:55,746 INFO [M:0;ec1863dc21e5:40763 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T01:24:55,747 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T01:24:55,747 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:55,747 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:55,747 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T01:24:55,747 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:55,747 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=68.33 KB heapSize=83.72 KB 2024-12-07T01:24:55,762 DEBUG [M:0;ec1863dc21e5:40763 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a546119250c845bdacf50c1da73864f8 is 82, key is hbase:meta,,1/info:regioninfo/1733534659198/Put/seqid=0 2024-12-07T01:24:55,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741942_1122 (size=5672) 2024-12-07T01:24:55,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741942_1122 (size=5672) 2024-12-07T01:24:55,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741942_1122 (size=5672) 2024-12-07T01:24:55,768 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a546119250c845bdacf50c1da73864f8 2024-12-07T01:24:55,786 DEBUG [M:0;ec1863dc21e5:40763 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb336e6adcf145dda10cea57b0b9c439 is 1075, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733534672506/Put/seqid=0 2024-12-07T01:24:55,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:55,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45471-0x101ad6397820001, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:55,786 INFO [RS:0;ec1863dc21e5:45471 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T01:24:55,786 INFO [RS:0;ec1863dc21e5:45471 {}] regionserver.HRegionServer(1031): Exiting; stopping=ec1863dc21e5,45471,1733534655575; zookeeper connection closed. 2024-12-07T01:24:55,787 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@164bfee9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@164bfee9 2024-12-07T01:24:55,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741943_1123 (size=7754) 2024-12-07T01:24:55,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741943_1123 (size=7754) 2024-12-07T01:24:55,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741943_1123 (size=7754) 2024-12-07T01:24:55,792 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.60 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb336e6adcf145dda10cea57b0b9c439 2024-12-07T01:24:55,797 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bb336e6adcf145dda10cea57b0b9c439 2024-12-07T01:24:55,797 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:55,797 DEBUG [pool-76-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101ad6397820003, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:55,797 INFO [RS:2;ec1863dc21e5:42825 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T01:24:55,797 INFO [RS:2;ec1863dc21e5:42825 {}] regionserver.HRegionServer(1031): Exiting; stopping=ec1863dc21e5,42825,1733534655854; zookeeper connection closed. 2024-12-07T01:24:55,797 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@55b2addd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@55b2addd 2024-12-07T01:24:55,798 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T01:24:55,810 DEBUG [M:0;ec1863dc21e5:40763 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7ecc492596f4f3f850d7adab7780d78 is 69, key is ec1863dc21e5,42825,1733534655854/rs:state/1733534657709/Put/seqid=0 2024-12-07T01:24:55,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741944_1124 (size=5445) 2024-12-07T01:24:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741944_1124 (size=5445) 2024-12-07T01:24:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741944_1124 (size=5445) 2024-12-07T01:24:55,816 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7ecc492596f4f3f850d7adab7780d78 2024-12-07T01:24:55,820 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f7ecc492596f4f3f850d7adab7780d78 2024-12-07T01:24:55,821 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a546119250c845bdacf50c1da73864f8 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a546119250c845bdacf50c1da73864f8 2024-12-07T01:24:55,826 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a546119250c845bdacf50c1da73864f8, entries=8, sequenceid=168, filesize=5.5 K 2024-12-07T01:24:55,827 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb336e6adcf145dda10cea57b0b9c439 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb336e6adcf145dda10cea57b0b9c439 2024-12-07T01:24:55,832 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bb336e6adcf145dda10cea57b0b9c439 2024-12-07T01:24:55,832 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb336e6adcf145dda10cea57b0b9c439, entries=17, sequenceid=168, filesize=7.6 K 2024-12-07T01:24:55,833 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7ecc492596f4f3f850d7adab7780d78 as hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7ecc492596f4f3f850d7adab7780d78 2024-12-07T01:24:55,838 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f7ecc492596f4f3f850d7adab7780d78 2024-12-07T01:24:55,838 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42771/user/jenkins/test-data/6db8a2b6-9f51-bf6f-8040-c5108cfc8fee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7ecc492596f4f3f850d7adab7780d78, entries=3, sequenceid=168, filesize=5.3 K 2024-12-07T01:24:55,839 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 92ms, sequenceid=168, compaction requested=false 2024-12-07T01:24:55,840 INFO [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T01:24:55,840 DEBUG [M:0;ec1863dc21e5:40763 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733534695747Disabling compacts and flushes for region at 1733534695747Disabling writes for close at 1733534695747Obtaining lock to block concurrent updates at 1733534695747Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733534695747Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=69972, getHeapSize=85664, getOffHeapSize=0, getCellsCount=195 at 1733534695747Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733534695748 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733534695748Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733534695761 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733534695761Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733534695772 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733534695785 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733534695785Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733534695797 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733534695809 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733534695809Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@350abd0b: reopening flushed file at 1733534695820 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@917d9aa: reopening flushed file at 1733534695826 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f95c28a: reopening flushed file at 1733534695832 (+6 ms)Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 92ms, sequenceid=168, compaction requested=false at 1733534695839 (+7 ms)Writing region close event to WAL at 1733534695840 (+1 ms)Closed at 1733534695840 2024-12-07T01:24:55,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40681 is added to blk_1073741830_1006 (size=56576) 2024-12-07T01:24:55,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38509 is added to blk_1073741830_1006 (size=56576) 2024-12-07T01:24:55,843 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T01:24:55,843 INFO [M:0;ec1863dc21e5:40763 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T01:24:55,843 INFO [M:0;ec1863dc21e5:40763 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40763 2024-12-07T01:24:55,844 INFO [M:0;ec1863dc21e5:40763 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T01:24:55,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:55,955 INFO [M:0;ec1863dc21e5:40763 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T01:24:55,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40763-0x101ad6397820000, quorum=127.0.0.1:59844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T01:24:55,960 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675990 with renewLeaseKey: DEFAULT_16655 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675990 (inode 16655) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733534675563/wal.1733534675990 (inode 16655) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-07T01:24:55,961 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733534667588/wal.1733534667793 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:55,961 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733534690634/wal.1733534690915 with renewLeaseKey: DEFAULT_16767 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:55,963 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534685437 with renewLeaseKey: DEFAULT_16678 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534685437 (inode 16678) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733534676154/wal.1733534685437 (inode 16678) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-07T01:24:55,964 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733534660702/wal.1733534660779 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:55,966 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534686070 with renewLeaseKey: DEFAULT_16704 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534686070 (inode 16704) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733534685625/wal.1733534686070 (inode 16704) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-07T01:24:55,967 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733534686221/wal.1733534686278 with renewLeaseKey: DEFAULT_16726 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T01:24:55,969 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal.1733534660576 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal.1733534660576 (inode 16485) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733534660394/wal.1733534660576 (inode 16485) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-07T01:24:55,972 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal.1733534660177 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal.1733534660177 (inode 16462) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733534659542/wal.1733534660177 (inode 16462) Holder DFSClient_NONMAPREDUCE_103124788_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor4.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-07T01:24:55,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35f1150e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T01:24:55,977 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T01:24:55,977 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T01:24:55,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T01:24:55,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,STOPPED} 2024-12-07T01:24:55,980 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T01:24:55,980 WARN [BP-1847206258-172.17.0.3-1733534647641 heartbeating to localhost/127.0.0.1:42771 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T01:24:55,980 WARN [BP-1847206258-172.17.0.3-1733534647641 heartbeating to localhost/127.0.0.1:42771 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1847206258-172.17.0.3-1733534647641 (Datanode Uuid 12105a20-257c-43a8-82d6-b8265e6e70c2) service to localhost/127.0.0.1:42771 2024-12-07T01:24:55,980 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T01:24:55,981 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data5/current/BP-1847206258-172.17.0.3-1733534647641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T01:24:55,981 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data6/current/BP-1847206258-172.17.0.3-1733534647641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T01:24:55,981 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T01:24:55,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bd427b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T01:24:55,983 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T01:24:55,983 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T01:24:55,984 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T01:24:55,984 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,STOPPED} 2024-12-07T01:24:55,985 WARN [BP-1847206258-172.17.0.3-1733534647641 heartbeating to localhost/127.0.0.1:42771 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T01:24:55,985 WARN [BP-1847206258-172.17.0.3-1733534647641 heartbeating to localhost/127.0.0.1:42771 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1847206258-172.17.0.3-1733534647641 (Datanode Uuid cb8add91-94da-4995-a3f1-fe13c01d1ad0) service to localhost/127.0.0.1:42771 2024-12-07T01:24:55,985 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T01:24:55,985 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T01:24:55,985 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data3/current/BP-1847206258-172.17.0.3-1733534647641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T01:24:55,985 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data4/current/BP-1847206258-172.17.0.3-1733534647641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T01:24:55,986 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T01:24:55,987 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@330740de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T01:24:55,988 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T01:24:55,988 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T01:24:55,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T01:24:55,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,STOPPED} 2024-12-07T01:24:55,989 WARN [BP-1847206258-172.17.0.3-1733534647641 heartbeating to localhost/127.0.0.1:42771 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T01:24:55,989 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T01:24:55,989 WARN [BP-1847206258-172.17.0.3-1733534647641 heartbeating to localhost/127.0.0.1:42771 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1847206258-172.17.0.3-1733534647641 (Datanode Uuid 3bfca5df-b42a-4eb5-9c04-fc996d72f988) service to localhost/127.0.0.1:42771 2024-12-07T01:24:55,989 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T01:24:55,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data1/current/BP-1847206258-172.17.0.3-1733534647641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T01:24:55,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/cluster_cdda0476-f4e5-e691-b763-dd002d7cf315/data/data2/current/BP-1847206258-172.17.0.3-1733534647641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T01:24:55,990 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T01:24:55,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3717288f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T01:24:55,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T01:24:55,997 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T01:24:55,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T01:24:55,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2037105d-39fa-1170-b3e2-2dd24022b89c/hadoop.log.dir/,STOPPED} 2024-12-07T01:24:56,006 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T01:24:56,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down