2024-12-05 13:44:32,991 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-05 13:44:33,005 main DEBUG Took 0.011310 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 13:44:33,005 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 13:44:33,006 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 13:44:33,007 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 13:44:33,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,018 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 13:44:33,032 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,033 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,034 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,034 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,035 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,035 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,036 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,037 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,037 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,038 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,039 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,039 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,040 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,040 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,041 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,041 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,042 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,042 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,043 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,043 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,044 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,044 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,045 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,045 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:44:33,046 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,046 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 13:44:33,048 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:44:33,050 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 13:44:33,051 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 13:44:33,052 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 13:44:33,053 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 13:44:33,054 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 13:44:33,063 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 13:44:33,065 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 13:44:33,067 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 13:44:33,067 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 13:44:33,068 main DEBUG createAppenders(={Console}) 2024-12-05 13:44:33,069 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-12-05 13:44:33,069 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-05 13:44:33,070 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-12-05 13:44:33,070 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 13:44:33,071 main DEBUG OutputStream closed 2024-12-05 13:44:33,071 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 13:44:33,071 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 13:44:33,072 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-12-05 13:44:33,152 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 13:44:33,154 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 13:44:33,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 13:44:33,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 13:44:33,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 13:44:33,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 13:44:33,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 13:44:33,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 13:44:33,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 13:44:33,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 13:44:33,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 13:44:33,161 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 13:44:33,161 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 13:44:33,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 13:44:33,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 13:44:33,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 13:44:33,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 13:44:33,163 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 13:44:33,166 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 13:44:33,166 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-12-05 13:44:33,166 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 13:44:33,167 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-12-05T13:44:33,368 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f 2024-12-05 13:44:33,372 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 13:44:33,372 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T13:44:33,380 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-12-05T13:44:33,387 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayCompressed timeout: 13 mins 2024-12-05T13:44:33,407 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T13:44:33,444 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T13:44:33,445 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T13:44:33,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T13:44:33,466 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de, deleteOnExit=true 2024-12-05T13:44:33,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T13:44:33,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/test.cache.data in system properties and HBase conf 2024-12-05T13:44:33,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T13:44:33,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir in system properties and HBase conf 2024-12-05T13:44:33,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T13:44:33,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T13:44:33,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T13:44:33,544 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T13:44:33,623 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T13:44:33,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T13:44:33,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T13:44:33,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T13:44:33,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T13:44:33,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T13:44:33,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T13:44:33,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T13:44:33,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T13:44:33,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T13:44:33,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/nfs.dump.dir in system properties and HBase conf 2024-12-05T13:44:33,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/java.io.tmpdir in system properties and HBase conf 2024-12-05T13:44:33,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T13:44:33,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T13:44:33,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T13:44:34,532 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T13:44:34,602 INFO [Time-limited test {}] log.Log(170): Logging initialized @2351ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T13:44:34,668 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:44:34,730 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T13:44:34,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T13:44:34,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T13:44:34,755 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T13:44:34,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:44:34,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6630be9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,AVAILABLE} 2024-12-05T13:44:34,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70596a48{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T13:44:34,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1843526c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/java.io.tmpdir/jetty-localhost-42973-hadoop-hdfs-3_4_1-tests_jar-_-any-7583334297657115832/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T13:44:34,925 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@776c4d96{HTTP/1.1, (http/1.1)}{localhost:42973} 2024-12-05T13:44:34,925 INFO [Time-limited test {}] server.Server(415): Started @2674ms 2024-12-05T13:44:35,392 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:44:35,398 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T13:44:35,399 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T13:44:35,399 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T13:44:35,399 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T13:44:35,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b895d95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,AVAILABLE} 2024-12-05T13:44:35,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d33512e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T13:44:35,495 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4dc44ec6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/java.io.tmpdir/jetty-localhost-42937-hadoop-hdfs-3_4_1-tests_jar-_-any-1434390692489057100/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:44:35,496 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17f6e478{HTTP/1.1, (http/1.1)}{localhost:42937} 2024-12-05T13:44:35,496 INFO [Time-limited test {}] server.Server(415): Started @3246ms 2024-12-05T13:44:35,542 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T13:44:35,652 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:44:35,657 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T13:44:35,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T13:44:35,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T13:44:35,671 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T13:44:35,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e971547{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,AVAILABLE} 2024-12-05T13:44:35,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3264f7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T13:44:35,776 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28e1ba78{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/java.io.tmpdir/jetty-localhost-41249-hadoop-hdfs-3_4_1-tests_jar-_-any-4060721088672646507/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:44:35,777 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@702c0733{HTTP/1.1, (http/1.1)}{localhost:41249} 2024-12-05T13:44:35,777 INFO [Time-limited test {}] server.Server(415): Started @3527ms 2024-12-05T13:44:35,779 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T13:44:35,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:44:35,813 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T13:44:35,815 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T13:44:35,815 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T13:44:35,815 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T13:44:35,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf0fdb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,AVAILABLE} 2024-12-05T13:44:35,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6853f5bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T13:44:35,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4290616c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/java.io.tmpdir/jetty-localhost-37189-hadoop-hdfs-3_4_1-tests_jar-_-any-9530759946520208652/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:44:35,911 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e25d2b6{HTTP/1.1, (http/1.1)}{localhost:37189} 2024-12-05T13:44:35,911 INFO [Time-limited test {}] server.Server(415): Started @3661ms 2024-12-05T13:44:35,913 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T13:44:36,736 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data4/current/BP-1806447417-172.17.0.2-1733406274111/current, will proceed with Du for space computation calculation, 2024-12-05T13:44:36,736 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data3/current/BP-1806447417-172.17.0.2-1733406274111/current, will proceed with Du for space computation calculation, 2024-12-05T13:44:36,736 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data1/current/BP-1806447417-172.17.0.2-1733406274111/current, will proceed with Du for space computation calculation, 2024-12-05T13:44:36,736 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data2/current/BP-1806447417-172.17.0.2-1733406274111/current, will proceed with Du for space computation calculation, 2024-12-05T13:44:36,767 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T13:44:36,767 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T13:44:36,789 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data5/current/BP-1806447417-172.17.0.2-1733406274111/current, will proceed with Du for space computation calculation, 2024-12-05T13:44:36,789 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data6/current/BP-1806447417-172.17.0.2-1733406274111/current, will proceed with Du for space computation calculation, 2024-12-05T13:44:36,810 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T13:44:36,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9435bec3b3582f2b with lease ID 0x85a1cd3dcc3f12c2: Processing first storage report for DS-d6728e3a-655f-4055-aaee-849d4009aeae from datanode DatanodeRegistration(127.0.0.1:38521, datanodeUuid=96498956-7bd8-4587-ac7d-05a24b9825d4, infoPort=36047, infoSecurePort=0, ipcPort=41445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111) 2024-12-05T13:44:36,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9435bec3b3582f2b with lease ID 0x85a1cd3dcc3f12c2: from storage DS-d6728e3a-655f-4055-aaee-849d4009aeae node DatanodeRegistration(127.0.0.1:38521, datanodeUuid=96498956-7bd8-4587-ac7d-05a24b9825d4, infoPort=36047, infoSecurePort=0, ipcPort=41445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T13:44:36,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd10473bfb3120568 with lease ID 0x85a1cd3dcc3f12c1: Processing first storage report for DS-02bc0856-58fd-483a-b6da-15cdad22a168 from datanode DatanodeRegistration(127.0.0.1:43927, datanodeUuid=aece2e20-6afc-40e8-a0a7-d473ec6b7265, infoPort=43707, infoSecurePort=0, ipcPort=36363, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111) 2024-12-05T13:44:36,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd10473bfb3120568 with lease ID 0x85a1cd3dcc3f12c1: from storage DS-02bc0856-58fd-483a-b6da-15cdad22a168 node DatanodeRegistration(127.0.0.1:43927, datanodeUuid=aece2e20-6afc-40e8-a0a7-d473ec6b7265, infoPort=43707, infoSecurePort=0, ipcPort=36363, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T13:44:36,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b40002648831f3 with lease ID 0x85a1cd3dcc3f12c3: Processing first storage report for DS-bd01db56-461b-482d-8826-5939344b09cb from datanode DatanodeRegistration(127.0.0.1:44137, datanodeUuid=9755e825-eb33-44d2-b506-195533761fcc, infoPort=45355, infoSecurePort=0, ipcPort=37445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111) 2024-12-05T13:44:36,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b40002648831f3 with lease ID 0x85a1cd3dcc3f12c3: from storage DS-bd01db56-461b-482d-8826-5939344b09cb node DatanodeRegistration(127.0.0.1:44137, datanodeUuid=9755e825-eb33-44d2-b506-195533761fcc, infoPort=45355, infoSecurePort=0, ipcPort=37445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T13:44:36,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9435bec3b3582f2b with lease ID 0x85a1cd3dcc3f12c2: Processing first storage report for DS-f28ca866-a5b7-418f-9de7-bd619d7c4167 from datanode DatanodeRegistration(127.0.0.1:38521, datanodeUuid=96498956-7bd8-4587-ac7d-05a24b9825d4, infoPort=36047, infoSecurePort=0, ipcPort=41445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111) 2024-12-05T13:44:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9435bec3b3582f2b with lease ID 0x85a1cd3dcc3f12c2: from storage DS-f28ca866-a5b7-418f-9de7-bd619d7c4167 node DatanodeRegistration(127.0.0.1:38521, datanodeUuid=96498956-7bd8-4587-ac7d-05a24b9825d4, infoPort=36047, infoSecurePort=0, ipcPort=41445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T13:44:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd10473bfb3120568 with lease ID 0x85a1cd3dcc3f12c1: Processing first storage report for DS-9fb0b230-d143-4ecc-aac9-443102289f9d from datanode DatanodeRegistration(127.0.0.1:43927, datanodeUuid=aece2e20-6afc-40e8-a0a7-d473ec6b7265, infoPort=43707, infoSecurePort=0, ipcPort=36363, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111) 2024-12-05T13:44:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd10473bfb3120568 with lease ID 0x85a1cd3dcc3f12c1: from storage DS-9fb0b230-d143-4ecc-aac9-443102289f9d node DatanodeRegistration(127.0.0.1:43927, datanodeUuid=aece2e20-6afc-40e8-a0a7-d473ec6b7265, infoPort=43707, infoSecurePort=0, ipcPort=36363, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T13:44:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b40002648831f3 with lease ID 0x85a1cd3dcc3f12c3: Processing first storage report for DS-95d61a72-d7a9-4807-b8cb-696595e20862 from datanode DatanodeRegistration(127.0.0.1:44137, datanodeUuid=9755e825-eb33-44d2-b506-195533761fcc, infoPort=45355, infoSecurePort=0, ipcPort=37445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111) 2024-12-05T13:44:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b40002648831f3 with lease ID 0x85a1cd3dcc3f12c3: from storage DS-95d61a72-d7a9-4807-b8cb-696595e20862 node DatanodeRegistration(127.0.0.1:44137, datanodeUuid=9755e825-eb33-44d2-b506-195533761fcc, infoPort=45355, infoSecurePort=0, ipcPort=37445, storageInfo=lv=-57;cid=testClusterID;nsid=1137224814;c=1733406274111), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T13:44:36,824 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f 2024-12-05T13:44:36,901 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/zookeeper_0, clientPort=53425, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T13:44:36,909 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53425 2024-12-05T13:44:36,920 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:36,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:37,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741825_1001 (size=7) 2024-12-05T13:44:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741825_1001 (size=7) 2024-12-05T13:44:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741825_1001 (size=7) 2024-12-05T13:44:37,515 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13 with version=8 2024-12-05T13:44:37,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/hbase-staging 2024-12-05T13:44:37,803 INFO [Time-limited test {}] client.ConnectionUtils(128): master/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:44:37,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:37,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:37,817 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:44:37,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:37,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:44:37,951 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T13:44:38,013 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T13:44:38,023 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T13:44:38,027 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:44:38,055 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 70265 (auto-detected) 2024-12-05T13:44:38,056 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T13:44:38,075 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39625 2024-12-05T13:44:38,094 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39625 connecting to ZooKeeper ensemble=127.0.0.1:53425 2024-12-05T13:44:38,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396250x0, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:44:38,206 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39625-0x101a7065f890000 connected 2024-12-05T13:44:38,301 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,317 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:44:38,321 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13, hbase.cluster.distributed=false 2024-12-05T13:44:38,340 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:44:38,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39625 2024-12-05T13:44:38,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39625 2024-12-05T13:44:38,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39625 2024-12-05T13:44:38,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39625 2024-12-05T13:44:38,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39625 2024-12-05T13:44:38,430 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:44:38,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,432 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:44:38,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:44:38,435 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T13:44:38,437 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:44:38,437 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43377 2024-12-05T13:44:38,439 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43377 connecting to ZooKeeper ensemble=127.0.0.1:53425 2024-12-05T13:44:38,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,442 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433770x0, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:44:38,456 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:433770x0, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:44:38,456 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43377-0x101a7065f890001 connected 2024-12-05T13:44:38,460 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T13:44:38,467 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T13:44:38,469 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:44:38,473 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:44:38,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43377 2024-12-05T13:44:38,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43377 2024-12-05T13:44:38,475 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43377 2024-12-05T13:44:38,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43377 2024-12-05T13:44:38,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43377 2024-12-05T13:44:38,491 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:44:38,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,492 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:44:38,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:44:38,492 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T13:44:38,493 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:44:38,493 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34361 2024-12-05T13:44:38,495 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34361 connecting to ZooKeeper ensemble=127.0.0.1:53425 2024-12-05T13:44:38,496 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,498 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343610x0, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:44:38,509 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:343610x0, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:44:38,509 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34361-0x101a7065f890002 connected 2024-12-05T13:44:38,509 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T13:44:38,510 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T13:44:38,511 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:44:38,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:44:38,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34361 2024-12-05T13:44:38,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34361 2024-12-05T13:44:38,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34361 2024-12-05T13:44:38,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34361 2024-12-05T13:44:38,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34361 2024-12-05T13:44:38,529 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:44:38,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,529 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:44:38,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:44:38,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:44:38,530 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T13:44:38,530 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:44:38,530 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42407 2024-12-05T13:44:38,532 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42407 connecting to ZooKeeper ensemble=127.0.0.1:53425 2024-12-05T13:44:38,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424070x0, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:44:38,547 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:424070x0, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:44:38,547 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42407-0x101a7065f890003 connected 2024-12-05T13:44:38,548 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T13:44:38,549 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T13:44:38,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:44:38,551 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:44:38,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42407 2024-12-05T13:44:38,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42407 2024-12-05T13:44:38,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42407 2024-12-05T13:44:38,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42407 2024-12-05T13:44:38,554 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42407 2024-12-05T13:44:38,568 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;da6aa2204f50:39625 2024-12-05T13:44:38,569 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/da6aa2204f50,39625,1733406277651 2024-12-05T13:44:38,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,585 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/da6aa2204f50,39625,1733406277651 2024-12-05T13:44:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:44:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:44:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:44:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,618 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:44:38,620 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/da6aa2204f50,39625,1733406277651 from backup master directory 2024-12-05T13:44:38,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/da6aa2204f50,39625,1733406277651 2024-12-05T13:44:38,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:44:38,631 WARN [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:44:38,631 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=da6aa2204f50,39625,1733406277651 2024-12-05T13:44:38,634 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T13:44:38,636 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T13:44:38,690 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/hbase.id] with ID: 20de6e0f-2932-48fa-9e1b-78c45c9fed57 2024-12-05T13:44:38,690 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/.tmp/hbase.id 2024-12-05T13:44:38,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741826_1002 (size=42) 2024-12-05T13:44:38,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741826_1002 (size=42) 2024-12-05T13:44:38,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741826_1002 (size=42) 2024-12-05T13:44:38,706 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/.tmp/hbase.id]:[hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/hbase.id] 2024-12-05T13:44:38,750 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:44:38,754 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T13:44:38,771 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 14ms. 2024-12-05T13:44:38,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741827_1003 (size=196) 2024-12-05T13:44:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741827_1003 (size=196) 2024-12-05T13:44:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741827_1003 (size=196) 2024-12-05T13:44:38,818 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T13:44:38,819 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T13:44:38,824 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:38,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741828_1004 (size=1189) 2024-12-05T13:44:38,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741828_1004 (size=1189) 2024-12-05T13:44:38,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741828_1004 (size=1189) 2024-12-05T13:44:38,867 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store 2024-12-05T13:44:38,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741829_1005 (size=34) 2024-12-05T13:44:38,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741829_1005 (size=34) 2024-12-05T13:44:38,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741829_1005 (size=34) 2024-12-05T13:44:38,892 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T13:44:38,895 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:38,896 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T13:44:38,896 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:44:38,896 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:44:38,897 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T13:44:38,898 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:44:38,898 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:44:38,899 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733406278896Disabling compacts and flushes for region at 1733406278896Disabling writes for close at 1733406278898 (+2 ms)Writing region close event to WAL at 1733406278898Closed at 1733406278898 2024-12-05T13:44:38,901 WARN [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/.initializing 2024-12-05T13:44:38,901 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/WALs/da6aa2204f50,39625,1733406277651 2024-12-05T13:44:38,908 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:38,922 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C39625%2C1733406277651, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/WALs/da6aa2204f50,39625,1733406277651, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/oldWALs, maxLogs=10 2024-12-05T13:44:38,948 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/WALs/da6aa2204f50,39625,1733406277651/da6aa2204f50%2C39625%2C1733406277651.1733406278925, exclude list is [], retry=0 2024-12-05T13:44:38,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:38,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:38,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:38,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:38,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T13:44:39,007 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/WALs/da6aa2204f50,39625,1733406277651/da6aa2204f50%2C39625%2C1733406277651.1733406278925 2024-12-05T13:44:39,007 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:44:39,008 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:39,008 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:39,011 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,012 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T13:44:39,074 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:39,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T13:44:39,079 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:39,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T13:44:39,083 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:39,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T13:44:39,086 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:39,088 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,091 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,092 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,097 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,098 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,101 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:44:39,104 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:44:39,108 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:39,110 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63067713, jitterRate=-0.06021784245967865}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:44:39,116 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733406279025Initializing all the Stores at 1733406279028 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406279028Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406279029 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406279029Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406279029Cleaning up temporary data from old regions at 1733406279098 (+69 ms)Region opened successfully at 1733406279116 (+18 ms) 2024-12-05T13:44:39,117 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T13:44:39,148 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53f1d554, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:44:39,173 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T13:44:39,182 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T13:44:39,182 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T13:44:39,184 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T13:44:39,185 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T13:44:39,189 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-05T13:44:39,190 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T13:44:39,210 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T13:44:39,218 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T13:44:39,258 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T13:44:39,261 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T13:44:39,264 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T13:44:39,274 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T13:44:39,276 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T13:44:39,279 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T13:44:39,288 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T13:44:39,289 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T13:44:39,299 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T13:44:39,319 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T13:44:39,329 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T13:44:39,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:44:39,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:44:39,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:44:39,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:44:39,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,346 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=da6aa2204f50,39625,1733406277651, sessionid=0x101a7065f890000, setting cluster-up flag (Was=false) 2024-12-05T13:44:39,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,400 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T13:44:39,404 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,39625,1733406277651 2024-12-05T13:44:39,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:39,449 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T13:44:39,451 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,39625,1733406277651 2024-12-05T13:44:39,459 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T13:44:39,526 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T13:44:39,536 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T13:44:39,543 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T13:44:39,548 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: da6aa2204f50,39625,1733406277651 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T13:44:39,554 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:44:39,555 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:44:39,555 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:44:39,555 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:44:39,555 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/da6aa2204f50:0, corePoolSize=10, maxPoolSize=10 2024-12-05T13:44:39,555 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,555 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:44:39,556 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,558 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733406309558 2024-12-05T13:44:39,558 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(746): ClusterId : 20de6e0f-2932-48fa-9e1b-78c45c9fed57 2024-12-05T13:44:39,558 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(746): ClusterId : 20de6e0f-2932-48fa-9e1b-78c45c9fed57 2024-12-05T13:44:39,558 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(746): ClusterId : 20de6e0f-2932-48fa-9e1b-78c45c9fed57 2024-12-05T13:44:39,560 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T13:44:39,561 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T13:44:39,561 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T13:44:39,561 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T13:44:39,561 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T13:44:39,562 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T13:44:39,562 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T13:44:39,564 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T13:44:39,565 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T13:44:39,565 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T13:44:39,565 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T13:44:39,567 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,567 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T13:44:39,566 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,569 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T13:44:39,571 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T13:44:39,571 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T13:44:39,574 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T13:44:39,574 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T13:44:39,574 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T13:44:39,574 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T13:44:39,574 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T13:44:39,574 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T13:44:39,578 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T13:44:39,579 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T13:44:39,582 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406279581,5,FailOnTimeoutGroup] 2024-12-05T13:44:39,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741831_1007 (size=1321) 2024-12-05T13:44:39,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741831_1007 (size=1321) 2024-12-05T13:44:39,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741831_1007 (size=1321) 2024-12-05T13:44:39,588 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T13:44:39,588 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13 2024-12-05T13:44:39,591 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406279582,5,FailOnTimeoutGroup] 2024-12-05T13:44:39,591 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,591 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T13:44:39,592 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T13:44:39,593 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T13:44:39,593 DEBUG [RS:2;da6aa2204f50:42407 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51fbce64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:44:39,593 DEBUG [RS:1;da6aa2204f50:34361 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45e832b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:44:39,593 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,593 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,594 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T13:44:39,595 DEBUG [RS:0;da6aa2204f50:43377 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ffa93e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:44:39,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741832_1008 (size=32) 2024-12-05T13:44:39,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741832_1008 (size=32) 2024-12-05T13:44:39,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741832_1008 (size=32) 2024-12-05T13:44:39,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:39,613 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;da6aa2204f50:34361 2024-12-05T13:44:39,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T13:44:39,616 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T13:44:39,616 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T13:44:39,617 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T13:44:39,617 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T13:44:39,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:39,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T13:44:39,619 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(2659): reportForDuty to master=da6aa2204f50,39625,1733406277651 with port=34361, startcode=1733406278491 2024-12-05T13:44:39,621 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T13:44:39,621 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:39,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T13:44:39,626 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T13:44:39,626 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:39,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T13:44:39,631 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T13:44:39,631 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:39,632 DEBUG [RS:1;da6aa2204f50:34361 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T13:44:39,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:39,633 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T13:44:39,634 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740 2024-12-05T13:44:39,635 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740 2024-12-05T13:44:39,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T13:44:39,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T13:44:39,640 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:44:39,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T13:44:39,647 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;da6aa2204f50:42407 2024-12-05T13:44:39,647 DEBUG [RS:0;da6aa2204f50:43377 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;da6aa2204f50:43377 2024-12-05T13:44:39,648 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T13:44:39,648 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T13:44:39,648 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T13:44:39,648 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T13:44:39,648 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T13:44:39,648 DEBUG [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T13:44:39,649 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(2659): reportForDuty to master=da6aa2204f50,39625,1733406277651 with port=43377, startcode=1733406278401 2024-12-05T13:44:39,649 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(2659): reportForDuty to master=da6aa2204f50,39625,1733406277651 with port=42407, startcode=1733406278528 2024-12-05T13:44:39,649 DEBUG [RS:0;da6aa2204f50:43377 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T13:44:39,649 DEBUG [RS:2;da6aa2204f50:42407 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T13:44:39,650 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:39,652 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65675147, jitterRate=-0.02136404812335968}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:44:39,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733406279611Initializing all the Stores at 1733406279613 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406279614 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406279614Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406279614Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406279614Cleaning up temporary data from old regions at 1733406279638 (+24 ms)Region opened successfully at 1733406279655 (+17 ms) 2024-12-05T13:44:39,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T13:44:39,655 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T13:44:39,656 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T13:44:39,656 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T13:44:39,656 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T13:44:39,657 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T13:44:39,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733406279655Disabling compacts and flushes for region at 1733406279655Disabling writes for close at 1733406279656 (+1 ms)Writing region close event to WAL at 1733406279657 (+1 ms)Closed at 1733406279657 2024-12-05T13:44:39,666 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T13:44:39,666 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T13:44:39,673 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41777, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T13:44:39,673 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43505, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T13:44:39,673 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57235, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T13:44:39,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T13:44:39,679 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(363): Checking decommissioned status of RegionServer da6aa2204f50,34361,1733406278491 2024-12-05T13:44:39,681 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(517): Registering regionserver=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:39,687 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T13:44:39,691 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T13:44:39,693 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(363): Checking decommissioned status of RegionServer da6aa2204f50,43377,1733406278401 2024-12-05T13:44:39,693 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(517): Registering regionserver=da6aa2204f50,43377,1733406278401 2024-12-05T13:44:39,696 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(363): Checking decommissioned status of RegionServer da6aa2204f50,42407,1733406278528 2024-12-05T13:44:39,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(517): Registering regionserver=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:39,698 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13 2024-12-05T13:44:39,699 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34105 2024-12-05T13:44:39,699 DEBUG [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13 2024-12-05T13:44:39,699 DEBUG [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34105 2024-12-05T13:44:39,699 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T13:44:39,699 DEBUG [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T13:44:39,700 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13 2024-12-05T13:44:39,701 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34105 2024-12-05T13:44:39,701 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T13:44:39,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:44:39,737 DEBUG [RS:2;da6aa2204f50:42407 {}] zookeeper.ZKUtil(111): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/da6aa2204f50,42407,1733406278528 2024-12-05T13:44:39,737 DEBUG [RS:0;da6aa2204f50:43377 {}] zookeeper.ZKUtil(111): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/da6aa2204f50,43377,1733406278401 2024-12-05T13:44:39,738 DEBUG [RS:1;da6aa2204f50:34361 {}] zookeeper.ZKUtil(111): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/da6aa2204f50,34361,1733406278491 2024-12-05T13:44:39,738 WARN [RS:2;da6aa2204f50:42407 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:44:39,738 WARN [RS:0;da6aa2204f50:43377 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:44:39,738 WARN [RS:1;da6aa2204f50:34361 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:44:39,738 INFO [RS:1;da6aa2204f50:34361 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:39,738 INFO [RS:2;da6aa2204f50:42407 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:39,738 INFO [RS:0;da6aa2204f50:43377 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:39,739 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491 2024-12-05T13:44:39,739 DEBUG [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,43377,1733406278401 2024-12-05T13:44:39,739 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,42407,1733406278528 2024-12-05T13:44:39,742 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [da6aa2204f50,42407,1733406278528] 2024-12-05T13:44:39,742 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [da6aa2204f50,43377,1733406278401] 2024-12-05T13:44:39,742 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [da6aa2204f50,34361,1733406278491] 2024-12-05T13:44:39,764 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T13:44:39,764 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T13:44:39,764 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T13:44:39,776 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T13:44:39,776 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T13:44:39,776 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T13:44:39,782 INFO [RS:0;da6aa2204f50:43377 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T13:44:39,782 INFO [RS:1;da6aa2204f50:34361 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T13:44:39,782 INFO [RS:2;da6aa2204f50:42407 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T13:44:39,782 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,782 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,782 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,783 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T13:44:39,783 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T13:44:39,784 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T13:44:39,789 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T13:44:39,789 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T13:44:39,789 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T13:44:39,791 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,791 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,791 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,791 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,791 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:44:39,792 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:44:39,792 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,792 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:44:39,792 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,793 DEBUG [RS:1;da6aa2204f50:34361 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:44:39,793 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,793 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,793 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,793 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,793 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:44:39,793 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:44:39,793 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:44:39,793 DEBUG [RS:0;da6aa2204f50:43377 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:44:39,793 DEBUG [RS:2;da6aa2204f50:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:44:39,794 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,794 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,794 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,794 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,794 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,794 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,34361,1733406278491-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:44:39,796 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,797 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,797 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,797 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,797 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,797 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,43377,1733406278401-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:44:39,801 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,801 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,801 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,802 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,802 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,802 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,42407,1733406278528-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:44:39,815 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T13:44:39,817 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,34361,1733406278491-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,817 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,817 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.Replication(171): da6aa2204f50,34361,1733406278491 started 2024-12-05T13:44:39,824 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T13:44:39,824 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T13:44:39,824 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,42407,1733406278528-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,824 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,43377,1733406278401-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,824 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,825 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.Replication(171): da6aa2204f50,43377,1733406278401 started 2024-12-05T13:44:39,825 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,825 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.Replication(171): da6aa2204f50,42407,1733406278528 started 2024-12-05T13:44:39,834 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,834 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1482): Serving as da6aa2204f50,34361,1733406278491, RpcServer on da6aa2204f50/172.17.0.2:34361, sessionid=0x101a7065f890002 2024-12-05T13:44:39,835 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T13:44:39,835 DEBUG [RS:1;da6aa2204f50:34361 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager da6aa2204f50,34361,1733406278491 2024-12-05T13:44:39,835 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,34361,1733406278491' 2024-12-05T13:44:39,835 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T13:44:39,836 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T13:44:39,837 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T13:44:39,837 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T13:44:39,837 DEBUG [RS:1;da6aa2204f50:34361 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager da6aa2204f50,34361,1733406278491 2024-12-05T13:44:39,837 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,34361,1733406278491' 2024-12-05T13:44:39,837 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T13:44:39,838 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T13:44:39,838 DEBUG [RS:1;da6aa2204f50:34361 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T13:44:39,838 INFO [RS:1;da6aa2204f50:34361 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T13:44:39,838 INFO [RS:1;da6aa2204f50:34361 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T13:44:39,842 WARN [da6aa2204f50:39625 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T13:44:39,847 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,847 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:39,847 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1482): Serving as da6aa2204f50,42407,1733406278528, RpcServer on da6aa2204f50/172.17.0.2:42407, sessionid=0x101a7065f890003 2024-12-05T13:44:39,847 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(1482): Serving as da6aa2204f50,43377,1733406278401, RpcServer on da6aa2204f50/172.17.0.2:43377, sessionid=0x101a7065f890001 2024-12-05T13:44:39,847 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T13:44:39,847 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T13:44:39,847 DEBUG [RS:0;da6aa2204f50:43377 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager da6aa2204f50,43377,1733406278401 2024-12-05T13:44:39,847 DEBUG [RS:2;da6aa2204f50:42407 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager da6aa2204f50,42407,1733406278528 2024-12-05T13:44:39,848 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,43377,1733406278401' 2024-12-05T13:44:39,848 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,42407,1733406278528' 2024-12-05T13:44:39,848 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T13:44:39,848 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T13:44:39,848 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T13:44:39,849 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T13:44:39,849 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T13:44:39,849 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T13:44:39,849 DEBUG [RS:2;da6aa2204f50:42407 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager da6aa2204f50,42407,1733406278528 2024-12-05T13:44:39,849 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T13:44:39,849 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,42407,1733406278528' 2024-12-05T13:44:39,849 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T13:44:39,849 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T13:44:39,849 DEBUG [RS:0;da6aa2204f50:43377 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager da6aa2204f50,43377,1733406278401 2024-12-05T13:44:39,849 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,43377,1733406278401' 2024-12-05T13:44:39,849 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T13:44:39,850 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T13:44:39,850 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T13:44:39,850 DEBUG [RS:2;da6aa2204f50:42407 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T13:44:39,850 DEBUG [RS:0;da6aa2204f50:43377 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T13:44:39,850 INFO [RS:2;da6aa2204f50:42407 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T13:44:39,850 INFO [RS:0;da6aa2204f50:43377 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T13:44:39,851 INFO [RS:2;da6aa2204f50:42407 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T13:44:39,851 INFO [RS:0;da6aa2204f50:43377 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T13:44:39,943 INFO [RS:1;da6aa2204f50:34361 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:39,946 INFO [RS:1;da6aa2204f50:34361 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C34361%2C1733406278491, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs, maxLogs=32 2024-12-05T13:44:39,951 INFO [RS:0;da6aa2204f50:43377 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:39,951 INFO [RS:2;da6aa2204f50:42407 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:39,955 INFO [RS:0;da6aa2204f50:43377 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C43377%2C1733406278401, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,43377,1733406278401, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs, maxLogs=32 2024-12-05T13:44:39,955 INFO [RS:2;da6aa2204f50:42407 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C42407%2C1733406278528, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,42407,1733406278528, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs, maxLogs=32 2024-12-05T13:44:39,967 DEBUG [RS:1;da6aa2204f50:34361 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491/da6aa2204f50%2C34361%2C1733406278491.1733406279949, exclude list is [], retry=0 2024-12-05T13:44:39,970 DEBUG [RS:2;da6aa2204f50:42407 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,42407,1733406278528/da6aa2204f50%2C42407%2C1733406278528.1733406279958, exclude list is [], retry=0 2024-12-05T13:44:39,971 DEBUG [RS:0;da6aa2204f50:43377 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,43377,1733406278401/da6aa2204f50%2C43377%2C1733406278401.1733406279958, exclude list is [], retry=0 2024-12-05T13:44:39,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:39,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:39,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:39,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:39,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:39,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:39,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:39,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:39,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:39,988 INFO [RS:1;da6aa2204f50:34361 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491/da6aa2204f50%2C34361%2C1733406278491.1733406279949 2024-12-05T13:44:39,989 DEBUG [RS:1;da6aa2204f50:34361 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:39,991 INFO [RS:2;da6aa2204f50:42407 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,42407,1733406278528/da6aa2204f50%2C42407%2C1733406278528.1733406279958 2024-12-05T13:44:39,993 DEBUG [RS:2;da6aa2204f50:42407 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:39,994 INFO [RS:0;da6aa2204f50:43377 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,43377,1733406278401/da6aa2204f50%2C43377%2C1733406278401.1733406279958 2024-12-05T13:44:39,996 DEBUG [RS:0;da6aa2204f50:43377 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:44:40,098 DEBUG [da6aa2204f50:39625 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T13:44:40,107 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(204): Hosts are {da6aa2204f50=0} racks are {/default-rack=0} 2024-12-05T13:44:40,113 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T13:44:40,113 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T13:44:40,113 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T13:44:40,113 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T13:44:40,113 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T13:44:40,113 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T13:44:40,113 INFO [da6aa2204f50:39625 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T13:44:40,113 INFO [da6aa2204f50:39625 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T13:44:40,113 INFO [da6aa2204f50:39625 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T13:44:40,113 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T13:44:40,120 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:40,124 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,34361,1733406278491, state=OPENING 2024-12-05T13:44:40,166 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T13:44:40,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:40,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:40,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:40,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:44:40,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,180 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T13:44:40,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=da6aa2204f50,34361,1733406278491}] 2024-12-05T13:44:40,362 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T13:44:40,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42861, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T13:44:40,375 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T13:44:40,376 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:40,376 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T13:44:40,379 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C34361%2C1733406278491.meta, suffix=.meta, logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs, maxLogs=32 2024-12-05T13:44:40,393 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, exclude list is [], retry=0 2024-12-05T13:44:40,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:40,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:40,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:40,400 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta 2024-12-05T13:44:40,401 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:40,401 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:40,402 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T13:44:40,404 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T13:44:40,408 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T13:44:40,411 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T13:44:40,412 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:40,412 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T13:44:40,412 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T13:44:40,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T13:44:40,416 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T13:44:40,416 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:40,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:40,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T13:44:40,419 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T13:44:40,419 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:40,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:40,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T13:44:40,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T13:44:40,423 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:40,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:40,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T13:44:40,425 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T13:44:40,426 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:40,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:40,427 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T13:44:40,428 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740 2024-12-05T13:44:40,431 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740 2024-12-05T13:44:40,433 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T13:44:40,433 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T13:44:40,434 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:44:40,436 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T13:44:40,438 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70448607, jitterRate=0.04976604878902435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:44:40,438 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T13:44:40,439 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733406280412Writing region info on filesystem at 1733406280413 (+1 ms)Initializing all the Stores at 1733406280414 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406280414Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406280415 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406280415Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406280415Cleaning up temporary data from old regions at 1733406280433 (+18 ms)Running coprocessor post-open hooks at 1733406280438 (+5 ms)Region opened successfully at 1733406280439 (+1 ms) 2024-12-05T13:44:40,445 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733406280351 2024-12-05T13:44:40,455 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T13:44:40,456 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T13:44:40,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:40,459 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,34361,1733406278491, state=OPEN 2024-12-05T13:44:40,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:40,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:40,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:40,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:40,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:40,535 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:40,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T13:44:40,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=da6aa2204f50,34361,1733406278491 in 352 msec 2024-12-05T13:44:40,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T13:44:40,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 869 msec 2024-12-05T13:44:40,551 DEBUG [PEWorker-4 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T13:44:40,551 INFO [PEWorker-4 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T13:44:40,587 DEBUG [PEWorker-4 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:44:40,588 DEBUG [PEWorker-4 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1] 2024-12-05T13:44:40,609 DEBUG [PEWorker-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:44:40,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:44:40,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1530 sec 2024-12-05T13:44:40,643 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733406280643, completionTime=-1 2024-12-05T13:44:40,646 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T13:44:40,647 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T13:44:40,672 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T13:44:40,672 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733406340672 2024-12-05T13:44:40,672 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733406400672 2024-12-05T13:44:40,672 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-12-05T13:44:40,674 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T13:44:40,681 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,39625,1733406277651-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:40,682 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,39625,1733406277651-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:40,682 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,39625,1733406277651-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:40,684 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-da6aa2204f50:39625, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:40,684 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:40,685 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:40,691 DEBUG [master/da6aa2204f50:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T13:44:40,712 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.080sec 2024-12-05T13:44:40,713 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T13:44:40,715 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T13:44:40,716 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T13:44:40,717 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T13:44:40,717 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T13:44:40,718 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,39625,1733406277651-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:44:40,718 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,39625,1733406277651-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T13:44:40,723 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T13:44:40,724 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T13:44:40,725 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,39625,1733406277651-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:40,769 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc063d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T13:44:40,770 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request da6aa2204f50,39625,-1 for getting cluster id 2024-12-05T13:44:40,772 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T13:44:40,781 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '20de6e0f-2932-48fa-9e1b-78c45c9fed57' 2024-12-05T13:44:40,784 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T13:44:40,784 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "20de6e0f-2932-48fa-9e1b-78c45c9fed57" 2024-12-05T13:44:40,785 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cbff4ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T13:44:40,785 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [da6aa2204f50,39625,-1] 2024-12-05T13:44:40,788 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T13:44:40,790 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:44:40,791 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T13:44:40,794 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@93f063a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T13:44:40,794 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:44:40,801 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1] 2024-12-05T13:44:40,802 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:44:40,804 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55146, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:44:40,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=da6aa2204f50,39625,1733406277651 2024-12-05T13:44:40,821 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:34105/hbase 2024-12-05T13:44:40,834 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testNameConflictWhenSplit0 Thread=368, OpenFileDescriptor=611, MaxFileDescriptor=1048576, SystemLoadAverage=410, ProcessCount=11, AvailableMemoryMB=8817 2024-12-05T13:44:40,851 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:40,855 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:40,856 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:40,860 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-59693590, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-59693590, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:40,874 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-59693590/hregion-59693590.1733406280861, exclude list is [], retry=0 2024-12-05T13:44:40,879 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:40,879 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:40,880 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:40,883 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-59693590/hregion-59693590.1733406280861 2024-12-05T13:44:40,884 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:40,884 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => b28f6081af0e4a4dafa7c257eb005c17, NAME => 'testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:44:40,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741838_1014 (size=64) 2024-12-05T13:44:40,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741838_1014 (size=64) 2024-12-05T13:44:40,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741838_1014 (size=64) 2024-12-05T13:44:40,900 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:40,902 INFO [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:40,904 INFO [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b28f6081af0e4a4dafa7c257eb005c17 columnFamilyName a 2024-12-05T13:44:40,904 DEBUG [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:40,905 INFO [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] regionserver.HStore(327): Store=b28f6081af0e4a4dafa7c257eb005c17/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:40,905 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:40,906 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:40,907 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:40,908 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:40,908 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:40,911 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:40,914 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:40,915 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened b28f6081af0e4a4dafa7c257eb005c17; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72722262, jitterRate=0.08364614844322205}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:44:40,916 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for b28f6081af0e4a4dafa7c257eb005c17: Writing region info on filesystem at 1733406280900Initializing all the Stores at 1733406280901 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406280901Cleaning up temporary data from old regions at 1733406280908 (+7 ms)Region opened successfully at 1733406280916 (+8 ms) 2024-12-05T13:44:40,916 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing b28f6081af0e4a4dafa7c257eb005c17, disabling compactions & flushes 2024-12-05T13:44:40,916 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17. 2024-12-05T13:44:40,917 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17. 2024-12-05T13:44:40,917 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17. after waiting 0 ms 2024-12-05T13:44:40,917 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17. 2024-12-05T13:44:40,917 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17. 2024-12-05T13:44:40,917 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for b28f6081af0e4a4dafa7c257eb005c17: Waiting for close lock at 1733406280916Disabling compacts and flushes for region at 1733406280916Disabling writes for close at 1733406280917 (+1 ms)Writing region close event to WAL at 1733406280917Closed at 1733406280917 2024-12-05T13:44:40,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741837_1013 (size=93) 2024-12-05T13:44:40,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741837_1013 (size=93) 2024-12-05T13:44:40,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741837_1013 (size=93) 2024-12-05T13:44:40,928 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:40,928 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-59693590:(num 1733406280861) 2024-12-05T13:44:40,930 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-05T13:44:40,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741839_1015 (size=276) 2024-12-05T13:44:40,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741839_1015 (size=276) 2024-12-05T13:44:40,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741839_1015 (size=276) 2024-12-05T13:44:40,949 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-05T13:44:40,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741840_1016 (size=230) 2024-12-05T13:44:40,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741840_1016 (size=230) 2024-12-05T13:44:40,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741840_1016 (size=230) 2024-12-05T13:44:40,979 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1, size=276 (276bytes) 2024-12-05T13:44:40,979 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-05T13:44:40,979 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-05T13:44:40,979 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1 2024-12-05T13:44:40,984 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1 after 3ms 2024-12-05T13:44:40,989 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:40,990 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1 took 13ms 2024-12-05T13:44:40,994 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1 so closing down 2024-12-05T13:44:40,997 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-05T13:44:40,999 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:40,999 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000001-wal-1.temp 2024-12-05T13:44:41,000 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:41,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741841_1017 (size=276) 2024-12-05T13:44:41,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741841_1017 (size=276) 2024-12-05T13:44:41,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741841_1017 (size=276) 2024-12-05T13:44:41,012 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:41,014 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002 2024-12-05T13:44:41,019 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1, size=276, length=276, corrupted=false, cancelled=false 2024-12-05T13:44:41,019 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1, journal: Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1, size=276 (276bytes) at 1733406280979Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1 so closing down at 1733406280994 (+15 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000001-wal-1.temp at 1733406280999 (+5 ms)3 split writer threads finished at 1733406281000 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733406281012 (+12 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002 at 1733406281014 (+2 ms)Processed 2 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-1, size=276, length=276, corrupted=false, cancelled=false at 1733406281019 (+5 ms) 2024-12-05T13:44:41,033 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2, size=230 (230bytes) 2024-12-05T13:44:41,033 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2 2024-12-05T13:44:41,034 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2 after 1ms 2024-12-05T13:44:41,038 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:41,039 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2 took 6ms 2024-12-05T13:44:41,041 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2 so closing down 2024-12-05T13:44:41,041 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:41,044 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-05T13:44:41,045 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002-wal-2.temp 2024-12-05T13:44:41,046 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:41,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741842_1018 (size=230) 2024-12-05T13:44:41,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741842_1018 (size=230) 2024-12-05T13:44:41,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741842_1018 (size=230) 2024-12-05T13:44:41,054 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:41,058 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:41,061 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002-wal-2.temp, length=230 2024-12-05T13:44:41,062 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2, size=230, length=230, corrupted=false, cancelled=false 2024-12-05T13:44:41,062 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2, journal: Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2, size=230 (230bytes) at 1733406281033Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2 so closing down at 1733406281041 (+8 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002-wal-2.temp at 1733406281045 (+4 ms)3 split writer threads finished at 1733406281046 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733406281054 (+8 ms)Processed 1 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal-2, size=230, length=230, corrupted=false, cancelled=false at 1733406281062 (+8 ms) 2024-12-05T13:44:41,063 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:41,065 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:41,078 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal.1733406281066, exclude list is [], retry=0 2024-12-05T13:44:41,083 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:41,084 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:41,084 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:41,087 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal.1733406281066 2024-12-05T13:44:41,087 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:41,087 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => b28f6081af0e4a4dafa7c257eb005c17, NAME => 'testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:41,087 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:41,088 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,088 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,090 INFO [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,091 INFO [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b28f6081af0e4a4dafa7c257eb005c17 columnFamilyName a 2024-12-05T13:44:41,091 DEBUG [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:41,092 INFO [StoreOpener-b28f6081af0e4a4dafa7c257eb005c17-1 {}] regionserver.HStore(327): Store=b28f6081af0e4a4dafa7c257eb005c17/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:41,092 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,094 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,096 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,098 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002 2024-12-05T13:44:41,102 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:41,108 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002 2024-12-05T13:44:41,113 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b28f6081af0e4a4dafa7c257eb005c17 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-05T13:44:41,170 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/.tmp/a/52f9a7941a8c45c7b84496e53bad4f1e is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733406280928/Put/seqid=0 2024-12-05T13:44:41,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741844_1020 (size=5170) 2024-12-05T13:44:41,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741844_1020 (size=5170) 2024-12-05T13:44:41,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741844_1020 (size=5170) 2024-12-05T13:44:41,186 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/.tmp/a/52f9a7941a8c45c7b84496e53bad4f1e 2024-12-05T13:44:41,227 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/.tmp/a/52f9a7941a8c45c7b84496e53bad4f1e as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/a/52f9a7941a8c45c7b84496e53bad4f1e 2024-12-05T13:44:41,237 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/a/52f9a7941a8c45c7b84496e53bad4f1e, entries=2, sequenceid=2, filesize=5.0 K 2024-12-05T13:44:41,243 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for b28f6081af0e4a4dafa7c257eb005c17 in 131ms, sequenceid=2, compaction requested=false; wal=null 2024-12-05T13:44:41,245 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/0000000000000000002 2024-12-05T13:44:41,246 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,246 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,249 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for b28f6081af0e4a4dafa7c257eb005c17 2024-12-05T13:44:41,253 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/b28f6081af0e4a4dafa7c257eb005c17/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-05T13:44:41,255 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened b28f6081af0e4a4dafa7c257eb005c17; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64898944, jitterRate=-0.03293037414550781}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:44:41,256 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for b28f6081af0e4a4dafa7c257eb005c17: Writing region info on filesystem at 1733406281088Initializing all the Stores at 1733406281089 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406281090 (+1 ms)Obtaining lock to block concurrent updates at 1733406281113 (+23 ms)Preparing flush snapshotting stores in b28f6081af0e4a4dafa7c257eb005c17 at 1733406281113Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733406281117 (+4 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733406280852.b28f6081af0e4a4dafa7c257eb005c17. at 1733406281117Flushing b28f6081af0e4a4dafa7c257eb005c17/a: creating writer at 1733406281119 (+2 ms)Flushing b28f6081af0e4a4dafa7c257eb005c17/a: appending metadata at 1733406281157 (+38 ms)Flushing b28f6081af0e4a4dafa7c257eb005c17/a: closing flushed file at 1733406281160 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e384f8d: reopening flushed file at 1733406281225 (+65 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for b28f6081af0e4a4dafa7c257eb005c17 in 131ms, sequenceid=2, compaction requested=false; wal=null at 1733406281243 (+18 ms)Cleaning up temporary data from old regions at 1733406281246 (+3 ms)Region opened successfully at 1733406281256 (+10 ms) 2024-12-05T13:44:41,281 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testNameConflictWhenSplit0 Thread=379 (was 368) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46464 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:60742 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:35886 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:60896 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:34105/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46306 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=703 (was 611) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=410 (was 410), ProcessCount=11 (was 11), AvailableMemoryMB=8804 (was 8817) 2024-12-05T13:44:41,291 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testNameConflictWhenSplit1 Thread=379, OpenFileDescriptor=703, MaxFileDescriptor=1048576, SystemLoadAverage=410, ProcessCount=11, AvailableMemoryMB=8804 2024-12-05T13:44:41,305 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:41,306 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:41,307 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:41,310 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-86668534, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-86668534, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:41,321 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-86668534/hregion-86668534.1733406281311, exclude list is [], retry=0 2024-12-05T13:44:41,325 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:41,325 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:41,325 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:41,328 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-86668534/hregion-86668534.1733406281311 2024-12-05T13:44:41,328 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:41,329 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 8f0dcf6115a5171f2b92264dde0f126f, NAME => 'testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:44:41,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741846_1022 (size=64) 2024-12-05T13:44:41,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741846_1022 (size=64) 2024-12-05T13:44:41,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741846_1022 (size=64) 2024-12-05T13:44:41,341 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:41,347 INFO [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,349 INFO [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0dcf6115a5171f2b92264dde0f126f columnFamilyName a 2024-12-05T13:44:41,349 DEBUG [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:41,350 INFO [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] regionserver.HStore(327): Store=8f0dcf6115a5171f2b92264dde0f126f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:41,350 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,351 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,352 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,353 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,353 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,355 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,359 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:41,360 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8f0dcf6115a5171f2b92264dde0f126f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63197710, jitterRate=-0.058280736207962036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:44:41,360 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8f0dcf6115a5171f2b92264dde0f126f: Writing region info on filesystem at 1733406281342Initializing all the Stores at 1733406281343 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406281343Cleaning up temporary data from old regions at 1733406281353 (+10 ms)Region opened successfully at 1733406281360 (+7 ms) 2024-12-05T13:44:41,360 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 8f0dcf6115a5171f2b92264dde0f126f, disabling compactions & flushes 2024-12-05T13:44:41,360 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f. 2024-12-05T13:44:41,360 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f. 2024-12-05T13:44:41,360 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f. after waiting 0 ms 2024-12-05T13:44:41,360 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f. 2024-12-05T13:44:41,360 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f. 2024-12-05T13:44:41,361 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 8f0dcf6115a5171f2b92264dde0f126f: Waiting for close lock at 1733406281360Disabling compacts and flushes for region at 1733406281360Disabling writes for close at 1733406281360Writing region close event to WAL at 1733406281360Closed at 1733406281360 2024-12-05T13:44:41,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741845_1021 (size=93) 2024-12-05T13:44:41,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741845_1021 (size=93) 2024-12-05T13:44:41,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741845_1021 (size=93) 2024-12-05T13:44:41,367 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:41,368 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-86668534:(num 1733406281311) 2024-12-05T13:44:41,369 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-05T13:44:41,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741847_1023 (size=276) 2024-12-05T13:44:41,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741847_1023 (size=276) 2024-12-05T13:44:41,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741847_1023 (size=276) 2024-12-05T13:44:41,384 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-05T13:44:41,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741848_1024 (size=230) 2024-12-05T13:44:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741848_1024 (size=230) 2024-12-05T13:44:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741848_1024 (size=230) 2024-12-05T13:44:41,409 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2, size=230 (230bytes) 2024-12-05T13:44:41,409 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2 2024-12-05T13:44:41,410 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2 after 1ms 2024-12-05T13:44:41,414 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:41,414 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2 took 5ms 2024-12-05T13:44:41,416 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2 so closing down 2024-12-05T13:44:41,416 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:41,419 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-05T13:44:41,420 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002-wal-2.temp 2024-12-05T13:44:41,421 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:41,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741849_1025 (size=230) 2024-12-05T13:44:41,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741849_1025 (size=230) 2024-12-05T13:44:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741849_1025 (size=230) 2024-12-05T13:44:41,430 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:41,432 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002 2024-12-05T13:44:41,432 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 18 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2, size=230, length=230, corrupted=false, cancelled=false 2024-12-05T13:44:41,432 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2, journal: Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2, size=230 (230bytes) at 1733406281409Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2 so closing down at 1733406281416 (+7 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002-wal-2.temp at 1733406281420 (+4 ms)3 split writer threads finished at 1733406281421 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733406281430 (+9 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002 at 1733406281432 (+2 ms)Processed 1 edits across 1 Regions in 18 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-2, size=230, length=230, corrupted=false, cancelled=false at 1733406281432 2024-12-05T13:44:41,446 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1, size=276 (276bytes) 2024-12-05T13:44:41,446 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1 2024-12-05T13:44:41,447 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1 after 1ms 2024-12-05T13:44:41,450 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:41,450 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1 took 4ms 2024-12-05T13:44:41,453 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1 so closing down 2024-12-05T13:44:41,453 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:41,455 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-05T13:44:41,456 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000001-wal-1.temp 2024-12-05T13:44:41,457 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:41,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741850_1026 (size=276) 2024-12-05T13:44:41,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741850_1026 (size=276) 2024-12-05T13:44:41,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741850_1026 (size=276) 2024-12-05T13:44:41,465 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:41,470 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:41,472 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002, length=230 2024-12-05T13:44:41,475 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002 2024-12-05T13:44:41,475 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1, size=276, length=276, corrupted=false, cancelled=false 2024-12-05T13:44:41,475 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1, journal: Splitting hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1, size=276 (276bytes) at 1733406281446Finishing writing output for hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1 so closing down at 1733406281453 (+7 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000001-wal-1.temp at 1733406281456 (+3 ms)3 split writer threads finished at 1733406281457 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733406281465 (+8 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002 at 1733406281475 (+10 ms)Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal-1, size=276, length=276, corrupted=false, cancelled=false at 1733406281475 2024-12-05T13:44:41,475 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:41,478 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:41,493 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal.1733406281478, exclude list is [], retry=0 2024-12-05T13:44:41,496 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:41,497 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:41,497 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:41,499 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal.1733406281478 2024-12-05T13:44:41,500 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:41,500 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 8f0dcf6115a5171f2b92264dde0f126f, NAME => 'testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:41,500 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:41,500 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,500 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,502 INFO [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,504 INFO [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0dcf6115a5171f2b92264dde0f126f columnFamilyName a 2024-12-05T13:44:41,504 DEBUG [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:41,505 INFO [StoreOpener-8f0dcf6115a5171f2b92264dde0f126f-1 {}] regionserver.HStore(327): Store=8f0dcf6115a5171f2b92264dde0f126f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:41,505 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,506 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,508 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,509 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002 2024-12-05T13:44:41,512 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:41,514 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002 2024-12-05T13:44:41,514 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8f0dcf6115a5171f2b92264dde0f126f 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-05T13:44:41,529 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/.tmp/a/713d38cd6cc14502a2ca4019f0e00630 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733406281368/Put/seqid=0 2024-12-05T13:44:41,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741852_1028 (size=5170) 2024-12-05T13:44:41,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741852_1028 (size=5170) 2024-12-05T13:44:41,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741852_1028 (size=5170) 2024-12-05T13:44:41,538 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/.tmp/a/713d38cd6cc14502a2ca4019f0e00630 2024-12-05T13:44:41,548 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/.tmp/a/713d38cd6cc14502a2ca4019f0e00630 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/a/713d38cd6cc14502a2ca4019f0e00630 2024-12-05T13:44:41,557 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/a/713d38cd6cc14502a2ca4019f0e00630, entries=2, sequenceid=2, filesize=5.0 K 2024-12-05T13:44:41,558 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 8f0dcf6115a5171f2b92264dde0f126f in 44ms, sequenceid=2, compaction requested=false; wal=null 2024-12-05T13:44:41,559 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/0000000000000000002 2024-12-05T13:44:41,560 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,560 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,563 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8f0dcf6115a5171f2b92264dde0f126f 2024-12-05T13:44:41,566 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/8f0dcf6115a5171f2b92264dde0f126f/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-05T13:44:41,567 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8f0dcf6115a5171f2b92264dde0f126f; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68861313, jitterRate=0.02611352503299713}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:44:41,567 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8f0dcf6115a5171f2b92264dde0f126f: Writing region info on filesystem at 1733406281500Initializing all the Stores at 1733406281502 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406281502Obtaining lock to block concurrent updates at 1733406281514 (+12 ms)Preparing flush snapshotting stores in 8f0dcf6115a5171f2b92264dde0f126f at 1733406281514Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733406281514Flushing stores of testReplayEditsWrittenIntoWAL,,1733406281305.8f0dcf6115a5171f2b92264dde0f126f. at 1733406281514Flushing 8f0dcf6115a5171f2b92264dde0f126f/a: creating writer at 1733406281514Flushing 8f0dcf6115a5171f2b92264dde0f126f/a: appending metadata at 1733406281528 (+14 ms)Flushing 8f0dcf6115a5171f2b92264dde0f126f/a: closing flushed file at 1733406281528Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e6ab8bf: reopening flushed file at 1733406281547 (+19 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 8f0dcf6115a5171f2b92264dde0f126f in 44ms, sequenceid=2, compaction requested=false; wal=null at 1733406281558 (+11 ms)Cleaning up temporary data from old regions at 1733406281560 (+2 ms)Region opened successfully at 1733406281567 (+7 ms) 2024-12-05T13:44:41,583 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testNameConflictWhenSplit1 Thread=389 (was 379) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:60742 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46536 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:35966 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46306 [Waiting for operation #20] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:60962 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=785 (was 703) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=410 (was 410), ProcessCount=11 (was 11), AvailableMemoryMB=8799 (was 8804) 2024-12-05T13:44:41,593 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsWrittenIntoWAL Thread=389, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=410, ProcessCount=11, AvailableMemoryMB=8798 2024-12-05T13:44:41,606 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:41,608 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:41,609 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:41,612 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-79681847, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-79681847, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:41,623 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-79681847/hregion-79681847.1733406281613, exclude list is [], retry=0 2024-12-05T13:44:41,626 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:41,627 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:41,627 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:41,629 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-79681847/hregion-79681847.1733406281613 2024-12-05T13:44:41,629 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:44:41,629 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 2cdce4a5bd15bc3efe602de253ae3080, NAME => 'testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:44:41,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741854_1030 (size=64) 2024-12-05T13:44:41,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741854_1030 (size=64) 2024-12-05T13:44:41,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741854_1030 (size=64) 2024-12-05T13:44:41,640 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:41,642 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,644 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cdce4a5bd15bc3efe602de253ae3080 columnFamilyName a 2024-12-05T13:44:41,644 DEBUG [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:41,644 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(327): Store=2cdce4a5bd15bc3efe602de253ae3080/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:41,645 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,646 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cdce4a5bd15bc3efe602de253ae3080 columnFamilyName b 2024-12-05T13:44:41,646 DEBUG [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:41,647 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(327): Store=2cdce4a5bd15bc3efe602de253ae3080/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:41,647 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,649 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cdce4a5bd15bc3efe602de253ae3080 columnFamilyName c 2024-12-05T13:44:41,649 DEBUG [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:41,649 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(327): Store=2cdce4a5bd15bc3efe602de253ae3080/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:41,650 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,651 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,651 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,653 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,653 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,654 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:41,655 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:41,658 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:41,658 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2cdce4a5bd15bc3efe602de253ae3080; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62854655, jitterRate=-0.06339265406131744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:41,659 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2cdce4a5bd15bc3efe602de253ae3080: Writing region info on filesystem at 1733406281641Initializing all the Stores at 1733406281642 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406281642Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406281642Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406281642Cleaning up temporary data from old regions at 1733406281653 (+11 ms)Region opened successfully at 1733406281659 (+6 ms) 2024-12-05T13:44:41,659 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2cdce4a5bd15bc3efe602de253ae3080, disabling compactions & flushes 2024-12-05T13:44:41,659 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:41,659 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:41,659 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. after waiting 0 ms 2024-12-05T13:44:41,659 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:41,660 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:41,660 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2cdce4a5bd15bc3efe602de253ae3080: Waiting for close lock at 1733406281659Disabling compacts and flushes for region at 1733406281659Disabling writes for close at 1733406281659Writing region close event to WAL at 1733406281660 (+1 ms)Closed at 1733406281660 2024-12-05T13:44:41,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741853_1029 (size=93) 2024-12-05T13:44:41,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741853_1029 (size=93) 2024-12-05T13:44:41,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741853_1029 (size=93) 2024-12-05T13:44:41,667 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:41,667 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-79681847:(num 1733406281613) 2024-12-05T13:44:41,667 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:41,670 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:41,683 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670, exclude list is [], retry=0 2024-12-05T13:44:41,686 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:41,686 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:41,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:41,689 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 2024-12-05T13:44:41,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:41,892 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670, size=0 (0bytes) 2024-12-05T13:44:41,892 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 might be still open, length is 0 2024-12-05T13:44:41,892 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 2024-12-05T13:44:41,893 WARN [IPC Server handler 2 on default port 34105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-12-05T13:44:41,894 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 after 2ms 2024-12-05T13:44:42,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:36004 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:38521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36004 dst: /127.0.0.1:38521 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38521 remote=/127.0.0.1:36004]. Total timeout mills is 60000, 59030 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:42,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:60990 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:43927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60990 dst: /127.0.0.1:43927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:42,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46574 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46574 dst: /127.0.0.1:44137 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:42,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741855_1032 (size=200589) 2024-12-05T13:44:42,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741855_1032 (size=200589) 2024-12-05T13:44:42,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741855_1032 (size=200589) 2024-12-05T13:44:45,896 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 after 4004ms 2024-12-05T13:44:45,907 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:45,909 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 took 4017ms 2024-12-05T13:44:45,914 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733406281670.temp 2024-12-05T13:44:45,918 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000000001-wal.1733406281670.temp 2024-12-05T13:44:45,921 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-05T13:44:45,982 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T13:44:46,028 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670; continuing. 2024-12-05T13:44:46,028 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 so closing down 2024-12-05T13:44:46,028 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:46,028 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:46,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741856_1033 (size=200597) 2024-12-05T13:44:46,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741856_1033 (size=200597) 2024-12-05T13:44:46,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741856_1033 (size=200597) 2024-12-05T13:44:46,033 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000000001-wal.1733406281670.temp (wrote 3002 edits, skipped 0 edits in 50 ms) 2024-12-05T13:44:46,035 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000000001-wal.1733406281670.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000003002 2024-12-05T13:44:46,036 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 125 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670, size=0, length=0, corrupted=false, cancelled=false 2024-12-05T13:44:46,036 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670, journal: Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670, size=0 (0bytes) at 1733406281892Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000000001-wal.1733406281670.temp at 1733406285918 (+4026 ms)Split 1024 edits, skipped 0 edits. at 1733406285980 (+62 ms)Split 2048 edits, skipped 0 edits. at 1733406286004 (+24 ms)Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 so closing down at 1733406286028 (+24 ms)3 split writer threads finished at 1733406286028Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000000001-wal.1733406281670.temp (wrote 3002 edits, skipped 0 edits in 50 ms) at 1733406286033 (+5 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000000001-wal.1733406281670.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000003002 at 1733406286035 (+2 ms)Processed 3002 edits across 1 Regions in 125 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670, size=0, length=0, corrupted=false, cancelled=false at 1733406286036 (+1 ms) 2024-12-05T13:44:46,038 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406281670 2024-12-05T13:44:46,040 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000003002 2024-12-05T13:44:46,040 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:46,042 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:46,055 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406286043, exclude list is [], retry=0 2024-12-05T13:44:46,059 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:46,060 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:46,060 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:46,063 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406286043 2024-12-05T13:44:46,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:44:46,063 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:46,065 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,067 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cdce4a5bd15bc3efe602de253ae3080 columnFamilyName a 2024-12-05T13:44:46,067 DEBUG [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:46,068 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(327): Store=2cdce4a5bd15bc3efe602de253ae3080/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:46,068 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,070 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cdce4a5bd15bc3efe602de253ae3080 columnFamilyName b 2024-12-05T13:44:46,070 DEBUG [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:46,071 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(327): Store=2cdce4a5bd15bc3efe602de253ae3080/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:46,071 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,073 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cdce4a5bd15bc3efe602de253ae3080 columnFamilyName c 2024-12-05T13:44:46,073 DEBUG [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:46,074 INFO [StoreOpener-2cdce4a5bd15bc3efe602de253ae3080-1 {}] regionserver.HStore(327): Store=2cdce4a5bd15bc3efe602de253ae3080/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:46,074 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,075 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,077 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,078 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000003002 2024-12-05T13:44:46,081 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:46,116 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-05T13:44:46,422 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2cdce4a5bd15bc3efe602de253ae3080 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-05T13:44:46,453 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/a/6038d6518e0b4f6b846577e4a3816e28 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1733406281696/Put/seqid=0 2024-12-05T13:44:46,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741858_1035 (size=50463) 2024-12-05T13:44:46,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741858_1035 (size=50463) 2024-12-05T13:44:46,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741858_1035 (size=50463) 2024-12-05T13:44:46,462 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/a/6038d6518e0b4f6b846577e4a3816e28 2024-12-05T13:44:46,470 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/a/6038d6518e0b4f6b846577e4a3816e28 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/a/6038d6518e0b4f6b846577e4a3816e28 2024-12-05T13:44:46,476 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/a/6038d6518e0b4f6b846577e4a3816e28, entries=754, sequenceid=754, filesize=49.3 K 2024-12-05T13:44:46,476 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 2cdce4a5bd15bc3efe602de253ae3080 in 54ms, sequenceid=754, compaction requested=false; wal=null 2024-12-05T13:44:46,501 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-05T13:44:46,502 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2cdce4a5bd15bc3efe602de253ae3080 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-05T13:44:46,512 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/a/7d0fa9426a214dc896f53bc472e9a4ae is 62, key is testReplayEditsWrittenIntoWAL/a:754/1733406281740/Put/seqid=0 2024-12-05T13:44:46,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741859_1036 (size=20072) 2024-12-05T13:44:46,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741859_1036 (size=20072) 2024-12-05T13:44:46,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741859_1036 (size=20072) 2024-12-05T13:44:46,526 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/a/7d0fa9426a214dc896f53bc472e9a4ae 2024-12-05T13:44:46,550 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/b/48983eaa1d0c4a3e86072d651534741c is 62, key is testReplayEditsWrittenIntoWAL/b:100/1733406281765/Put/seqid=0 2024-12-05T13:44:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741860_1037 (size=35835) 2024-12-05T13:44:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741860_1037 (size=35835) 2024-12-05T13:44:46,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741860_1037 (size=35835) 2024-12-05T13:44:46,559 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/b/48983eaa1d0c4a3e86072d651534741c 2024-12-05T13:44:46,567 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/a/7d0fa9426a214dc896f53bc472e9a4ae as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/a/7d0fa9426a214dc896f53bc472e9a4ae 2024-12-05T13:44:46,575 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/a/7d0fa9426a214dc896f53bc472e9a4ae, entries=246, sequenceid=1508, filesize=19.6 K 2024-12-05T13:44:46,577 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/b/48983eaa1d0c4a3e86072d651534741c as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/b/48983eaa1d0c4a3e86072d651534741c 2024-12-05T13:44:46,584 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/b/48983eaa1d0c4a3e86072d651534741c, entries=508, sequenceid=1508, filesize=35.0 K 2024-12-05T13:44:46,584 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 2cdce4a5bd15bc3efe602de253ae3080 in 83ms, sequenceid=1508, compaction requested=false; wal=null 2024-12-05T13:44:46,594 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-05T13:44:46,594 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2cdce4a5bd15bc3efe602de253ae3080 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-05T13:44:46,601 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/b/330f50755f1a40dc9864ac0ef5cdf96e is 62, key is testReplayEditsWrittenIntoWAL/b:508/1733406281782/Put/seqid=0 2024-12-05T13:44:46,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741861_1038 (size=35082) 2024-12-05T13:44:46,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741861_1038 (size=35082) 2024-12-05T13:44:46,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741861_1038 (size=35082) 2024-12-05T13:44:46,610 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/b/330f50755f1a40dc9864ac0ef5cdf96e 2024-12-05T13:44:46,633 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/c/bd2e8a631bba441aa5d0f2442c4c3598 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1733406281813/Put/seqid=0 2024-12-05T13:44:46,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741862_1039 (size=20825) 2024-12-05T13:44:46,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741862_1039 (size=20825) 2024-12-05T13:44:46,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741862_1039 (size=20825) 2024-12-05T13:44:46,643 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/c/bd2e8a631bba441aa5d0f2442c4c3598 2024-12-05T13:44:46,651 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/b/330f50755f1a40dc9864ac0ef5cdf96e as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/b/330f50755f1a40dc9864ac0ef5cdf96e 2024-12-05T13:44:46,658 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/b/330f50755f1a40dc9864ac0ef5cdf96e, entries=492, sequenceid=2262, filesize=34.3 K 2024-12-05T13:44:46,660 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/c/bd2e8a631bba441aa5d0f2442c4c3598 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/c/bd2e8a631bba441aa5d0f2442c4c3598 2024-12-05T13:44:46,667 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/c/bd2e8a631bba441aa5d0f2442c4c3598, entries=262, sequenceid=2262, filesize=20.3 K 2024-12-05T13:44:46,667 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 2cdce4a5bd15bc3efe602de253ae3080 in 73ms, sequenceid=2262, compaction requested=false; wal=null 2024-12-05T13:44:46,677 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1733406281857/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:46,680 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000003002 2024-12-05T13:44:46,681 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-05T13:44:46,681 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2cdce4a5bd15bc3efe602de253ae3080 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-12-05T13:44:46,690 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/c/7d862aed519e4d6a8287bc3d3af80527 is 62, key is testReplayEditsWrittenIntoWAL/c:262/1733406281822/Put/seqid=0 2024-12-05T13:44:46,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741863_1040 (size=50301) 2024-12-05T13:44:46,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741863_1040 (size=50301) 2024-12-05T13:44:46,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741863_1040 (size=50301) 2024-12-05T13:44:46,701 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/c/7d862aed519e4d6a8287bc3d3af80527 2024-12-05T13:44:46,708 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7d862aed519e4d6a8287bc3d3af80527 2024-12-05T13:44:46,710 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/.tmp/c/7d862aed519e4d6a8287bc3d3af80527 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/c/7d862aed519e4d6a8287bc3d3af80527 2024-12-05T13:44:46,716 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7d862aed519e4d6a8287bc3d3af80527 2024-12-05T13:44:46,716 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/c/7d862aed519e4d6a8287bc3d3af80527, entries=739, sequenceid=3002, filesize=49.1 K 2024-12-05T13:44:46,716 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 2cdce4a5bd15bc3efe602de253ae3080 in 35ms, sequenceid=3002, compaction requested=false; wal=null 2024-12-05T13:44:46,717 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/0000000000000003002 2024-12-05T13:44:46,719 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,719 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,719 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-05T13:44:46,721 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2cdce4a5bd15bc3efe602de253ae3080 2024-12-05T13:44:46,724 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenIntoWAL/2cdce4a5bd15bc3efe602de253ae3080/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-12-05T13:44:46,726 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2cdce4a5bd15bc3efe602de253ae3080; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66898430, jitterRate=-0.0031357109546661377}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-05T13:44:46,726 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2cdce4a5bd15bc3efe602de253ae3080: Writing region info on filesystem at 1733406286064Initializing all the Stores at 1733406286065 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406286065Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406286065Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406286065Cleaning up temporary data from old regions at 1733406286719 (+654 ms)Region opened successfully at 1733406286726 (+7 ms) 2024-12-05T13:44:46,799 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2cdce4a5bd15bc3efe602de253ae3080, disabling compactions & flushes 2024-12-05T13:44:46,799 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:46,799 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:46,799 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. after waiting 0 ms 2024-12-05T13:44:46,800 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:46,802 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733406281607.2cdce4a5bd15bc3efe602de253ae3080. 2024-12-05T13:44:46,802 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2cdce4a5bd15bc3efe602de253ae3080: Waiting for close lock at 1733406286799Disabling compacts and flushes for region at 1733406286799Disabling writes for close at 1733406286799Writing region close event to WAL at 1733406286802 (+3 ms)Closed at 1733406286802 2024-12-05T13:44:46,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741857_1034 (size=93) 2024-12-05T13:44:46,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741857_1034 (size=93) 2024-12-05T13:44:46,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741857_1034 (size=93) 2024-12-05T13:44:46,810 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:46,810 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733406286043) 2024-12-05T13:44:46,828 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsWrittenIntoWAL Thread=405 (was 389) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2013718449_22 at /127.0.0.1:46650 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (1987708253) connection to localhost/127.0.0.1:41445 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1987708253) connection to localhost/127.0.0.1:34105 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2013718449_22 at /127.0.0.1:32768 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1987708253) connection to localhost/127.0.0.1:36363 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2013718449_22 at /127.0.0.1:36062 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41445 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2013718449_22 at /127.0.0.1:36032 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:34105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=865 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=377 (was 410), ProcessCount=11 (was 11), AvailableMemoryMB=8727 (was 8798) 2024-12-05T13:44:46,841 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#test2727 Thread=405, OpenFileDescriptor=865, MaxFileDescriptor=1048576, SystemLoadAverage=377, ProcessCount=11, AvailableMemoryMB=8726 2024-12-05T13:44:46,856 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:46,858 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:46,859 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:46,863 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-56443363, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-56443363, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:46,876 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-56443363/hregion-56443363.1733406286863, exclude list is [], retry=0 2024-12-05T13:44:46,880 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:46,880 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:46,882 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:46,885 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-56443363/hregion-56443363.1733406286863 2024-12-05T13:44:46,886 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:46,886 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 03198fd9f015828ee253aeb2c375c543, NAME => 'test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:44:46,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741865_1042 (size=43) 2024-12-05T13:44:46,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741865_1042 (size=43) 2024-12-05T13:44:46,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741865_1042 (size=43) 2024-12-05T13:44:46,898 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:46,900 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,901 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03198fd9f015828ee253aeb2c375c543 columnFamilyName a 2024-12-05T13:44:46,901 DEBUG [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:46,902 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(327): Store=03198fd9f015828ee253aeb2c375c543/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:46,902 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,903 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03198fd9f015828ee253aeb2c375c543 columnFamilyName b 2024-12-05T13:44:46,903 DEBUG [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:46,904 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(327): Store=03198fd9f015828ee253aeb2c375c543/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:46,904 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,906 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03198fd9f015828ee253aeb2c375c543 columnFamilyName c 2024-12-05T13:44:46,906 DEBUG [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:46,906 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(327): Store=03198fd9f015828ee253aeb2c375c543/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:46,906 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,907 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,908 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,909 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,910 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,910 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:46,912 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:46,915 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:46,915 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 03198fd9f015828ee253aeb2c375c543; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60824377, jitterRate=-0.09364615380764008}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:46,916 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 03198fd9f015828ee253aeb2c375c543: Writing region info on filesystem at 1733406286898Initializing all the Stores at 1733406286899 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406286899Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406286899Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406286899Cleaning up temporary data from old regions at 1733406286910 (+11 ms)Region opened successfully at 1733406286916 (+6 ms) 2024-12-05T13:44:46,917 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 03198fd9f015828ee253aeb2c375c543, disabling compactions & flushes 2024-12-05T13:44:46,917 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:46,917 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:46,917 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. after waiting 0 ms 2024-12-05T13:44:46,917 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:46,917 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:46,917 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 03198fd9f015828ee253aeb2c375c543: Waiting for close lock at 1733406286917Disabling compacts and flushes for region at 1733406286917Disabling writes for close at 1733406286917Writing region close event to WAL at 1733406286917Closed at 1733406286917 2024-12-05T13:44:46,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741864_1041 (size=93) 2024-12-05T13:44:46,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741864_1041 (size=93) 2024-12-05T13:44:46,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741864_1041 (size=93) 2024-12-05T13:44:46,924 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:46,924 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-56443363:(num 1733406286863) 2024-12-05T13:44:46,924 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:46,928 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:46,945 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928, exclude list is [], retry=0 2024-12-05T13:44:46,948 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:46,948 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:46,949 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:46,951 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928 2024-12-05T13:44:46,951 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:47,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741866_1043 (size=200357) 2024-12-05T13:44:47,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741866_1043 (size=200357) 2024-12-05T13:44:47,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741866_1043 (size=200357) 2024-12-05T13:44:47,147 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928, size=195.7 K (200357bytes) 2024-12-05T13:44:47,147 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928 2024-12-05T13:44:47,148 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928 after 1ms 2024-12-05T13:44:47,151 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:47,153 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928 took 6ms 2024-12-05T13:44:47,158 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733406286928.temp 2024-12-05T13:44:47,159 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000000001-wal.1733406286928.temp 2024-12-05T13:44:47,235 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928 so closing down 2024-12-05T13:44:47,236 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:47,236 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:47,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741867_1044 (size=200357) 2024-12-05T13:44:47,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741867_1044 (size=200357) 2024-12-05T13:44:47,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741867_1044 (size=200357) 2024-12-05T13:44:47,244 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000000001-wal.1733406286928.temp (wrote 3000 edits, skipped 0 edits in 38 ms) 2024-12-05T13:44:47,246 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000000001-wal.1733406286928.temp to hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003000 2024-12-05T13:44:47,246 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 92 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928, size=195.7 K, length=200357, corrupted=false, cancelled=false 2024-12-05T13:44:47,247 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928, journal: Splitting hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928, size=195.7 K (200357bytes) at 1733406287147Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000000001-wal.1733406286928.temp at 1733406287159 (+12 ms)Split 1024 edits, skipped 0 edits. at 1733406287177 (+18 ms)Split 2048 edits, skipped 0 edits. at 1733406287197 (+20 ms)Finishing writing output for hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928 so closing down at 1733406287235 (+38 ms)3 split writer threads finished at 1733406287236 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000000001-wal.1733406286928.temp (wrote 3000 edits, skipped 0 edits in 38 ms) at 1733406287244 (+8 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000000001-wal.1733406286928.temp to hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003000 at 1733406287246 (+2 ms)Processed 3000 edits across 1 Regions in 92 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928, size=195.7 K, length=200357, corrupted=false, cancelled=false at 1733406287246 2024-12-05T13:44:47,249 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406286928 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406286928 2024-12-05T13:44:47,250 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003000 2024-12-05T13:44:47,251 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:47,254 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:47,268 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255, exclude list is [], retry=0 2024-12-05T13:44:47,275 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:47,276 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:47,276 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:47,285 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255 2024-12-05T13:44:47,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:47,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741868_1045 (size=200484) 2024-12-05T13:44:47,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741868_1045 (size=200484) 2024-12-05T13:44:47,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741868_1045 (size=200484) 2024-12-05T13:44:47,418 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255, size=195.8 K (200484bytes) 2024-12-05T13:44:47,418 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255 2024-12-05T13:44:47,419 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255 after 1ms 2024-12-05T13:44:47,423 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:47,424 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255 took 6ms 2024-12-05T13:44:47,429 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1733406287255.temp 2024-12-05T13:44:47,431 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003001-wal.1733406287255.temp 2024-12-05T13:44:47,461 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255 so closing down 2024-12-05T13:44:47,461 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:47,461 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:47,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741869_1046 (size=200484) 2024-12-05T13:44:47,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741869_1046 (size=200484) 2024-12-05T13:44:47,466 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003001-wal.1733406287255.temp (wrote 3000 edits, skipped 0 edits in 15 ms) 2024-12-05T13:44:47,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741869_1046 (size=200484) 2024-12-05T13:44:47,468 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003001-wal.1733406287255.temp to hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000006000 2024-12-05T13:44:47,468 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 43 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255, size=195.8 K, length=200484, corrupted=false, cancelled=false 2024-12-05T13:44:47,468 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255, journal: Splitting hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255, size=195.8 K (200484bytes) at 1733406287418Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003001-wal.1733406287255.temp at 1733406287431 (+13 ms)Split 1024 edits, skipped 0 edits. at 1733406287439 (+8 ms)Split 2048 edits, skipped 0 edits. at 1733406287450 (+11 ms)Finishing writing output for hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255 so closing down at 1733406287461 (+11 ms)3 split writer threads finished at 1733406287461Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003001-wal.1733406287255.temp (wrote 3000 edits, skipped 0 edits in 15 ms) at 1733406287466 (+5 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003001-wal.1733406287255.temp to hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000006000 at 1733406287468 (+2 ms)Processed 3000 edits across 1 Regions in 43 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255, size=195.8 K, length=200484, corrupted=false, cancelled=false at 1733406287468 2024-12-05T13:44:47,471 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287255 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406287255 2024-12-05T13:44:47,472 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000006000 2024-12-05T13:44:47,472 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:47,475 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/test2727-manual,16010,1733406286855, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:47,488 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287475, exclude list is [], retry=0 2024-12-05T13:44:47,492 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:47,492 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:47,493 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:47,495 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733406286855/wal.1733406287475 2024-12-05T13:44:47,496 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:47,496 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 03198fd9f015828ee253aeb2c375c543, NAME => 'test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:47,496 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:47,496 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,496 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,498 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,500 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03198fd9f015828ee253aeb2c375c543 columnFamilyName a 2024-12-05T13:44:47,500 DEBUG [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:47,501 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(327): Store=03198fd9f015828ee253aeb2c375c543/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:47,501 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,503 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03198fd9f015828ee253aeb2c375c543 columnFamilyName b 2024-12-05T13:44:47,503 DEBUG [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:47,503 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(327): Store=03198fd9f015828ee253aeb2c375c543/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:47,504 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,505 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03198fd9f015828ee253aeb2c375c543 columnFamilyName c 2024-12-05T13:44:47,505 DEBUG [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:47,506 INFO [StoreOpener-03198fd9f015828ee253aeb2c375c543-1 {}] regionserver.HStore(327): Store=03198fd9f015828ee253aeb2c375c543/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:47,506 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,507 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,509 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,510 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003000 2024-12-05T13:44:47,514 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:47,547 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003000 2024-12-05T13:44:47,548 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000006000 2024-12-05T13:44:47,550 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:47,584 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000006000 2024-12-05T13:44:47,585 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 03198fd9f015828ee253aeb2c375c543 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-12-05T13:44:47,604 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/a/91c5de7232e04ef4a3e0eb19a15e5697 is 41, key is test2727/a:100/1733406287288/Put/seqid=0 2024-12-05T13:44:47,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741871_1048 (size=84227) 2024-12-05T13:44:47,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741871_1048 (size=84227) 2024-12-05T13:44:47,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741871_1048 (size=84227) 2024-12-05T13:44:47,613 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/a/91c5de7232e04ef4a3e0eb19a15e5697 2024-12-05T13:44:47,638 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/b/e73162261bbc481f9d9aabdb320010f6 is 41, key is test2727/b:100/1733406287326/Put/seqid=0 2024-12-05T13:44:47,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741872_1049 (size=84609) 2024-12-05T13:44:47,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741872_1049 (size=84609) 2024-12-05T13:44:47,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741872_1049 (size=84609) 2024-12-05T13:44:47,644 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/b/e73162261bbc481f9d9aabdb320010f6 2024-12-05T13:44:47,678 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/c/b574dc3ca6b946298f6535a2396ba2b3 is 41, key is test2727/c:100/1733406287368/Put/seqid=0 2024-12-05T13:44:47,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741873_1050 (size=84609) 2024-12-05T13:44:47,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741873_1050 (size=84609) 2024-12-05T13:44:47,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741873_1050 (size=84609) 2024-12-05T13:44:47,685 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/c/b574dc3ca6b946298f6535a2396ba2b3 2024-12-05T13:44:47,695 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/a/91c5de7232e04ef4a3e0eb19a15e5697 as hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/a/91c5de7232e04ef4a3e0eb19a15e5697 2024-12-05T13:44:47,704 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/a/91c5de7232e04ef4a3e0eb19a15e5697, entries=2000, sequenceid=6000, filesize=82.3 K 2024-12-05T13:44:47,705 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/b/e73162261bbc481f9d9aabdb320010f6 as hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/b/e73162261bbc481f9d9aabdb320010f6 2024-12-05T13:44:47,714 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/b/e73162261bbc481f9d9aabdb320010f6, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-05T13:44:47,715 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/.tmp/c/b574dc3ca6b946298f6535a2396ba2b3 as hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/c/b574dc3ca6b946298f6535a2396ba2b3 2024-12-05T13:44:47,724 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/c/b574dc3ca6b946298f6535a2396ba2b3, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-05T13:44:47,725 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 03198fd9f015828ee253aeb2c375c543 in 140ms, sequenceid=6000, compaction requested=false; wal=null 2024-12-05T13:44:47,726 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000003000 2024-12-05T13:44:47,726 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/0000000000000006000 2024-12-05T13:44:47,728 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,728 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,729 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:47,731 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 03198fd9f015828ee253aeb2c375c543 2024-12-05T13:44:47,735 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/test2727/03198fd9f015828ee253aeb2c375c543/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-12-05T13:44:47,736 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 03198fd9f015828ee253aeb2c375c543; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67584680, jitterRate=0.007090210914611816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:47,738 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 03198fd9f015828ee253aeb2c375c543: Writing region info on filesystem at 1733406287496Initializing all the Stores at 1733406287498 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406287498Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406287498Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406287498Obtaining lock to block concurrent updates at 1733406287585 (+87 ms)Preparing flush snapshotting stores in 03198fd9f015828ee253aeb2c375c543 at 1733406287585Finished memstore snapshotting test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1733406287585Flushing stores of test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. at 1733406287585Flushing 03198fd9f015828ee253aeb2c375c543/a: creating writer at 1733406287585Flushing 03198fd9f015828ee253aeb2c375c543/a: appending metadata at 1733406287603 (+18 ms)Flushing 03198fd9f015828ee253aeb2c375c543/a: closing flushed file at 1733406287603Flushing 03198fd9f015828ee253aeb2c375c543/b: creating writer at 1733406287619 (+16 ms)Flushing 03198fd9f015828ee253aeb2c375c543/b: appending metadata at 1733406287636 (+17 ms)Flushing 03198fd9f015828ee253aeb2c375c543/b: closing flushed file at 1733406287636Flushing 03198fd9f015828ee253aeb2c375c543/c: creating writer at 1733406287652 (+16 ms)Flushing 03198fd9f015828ee253aeb2c375c543/c: appending metadata at 1733406287677 (+25 ms)Flushing 03198fd9f015828ee253aeb2c375c543/c: closing flushed file at 1733406287677Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@182f7a6f: reopening flushed file at 1733406287693 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ded436: reopening flushed file at 1733406287704 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18ad3893: reopening flushed file at 1733406287714 (+10 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 03198fd9f015828ee253aeb2c375c543 in 140ms, sequenceid=6000, compaction requested=false; wal=null at 1733406287725 (+11 ms)Cleaning up temporary data from old regions at 1733406287728 (+3 ms)Region opened successfully at 1733406287737 (+9 ms) 2024-12-05T13:44:47,739 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-12-05T13:44:47,739 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 03198fd9f015828ee253aeb2c375c543, disabling compactions & flushes 2024-12-05T13:44:47,740 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:47,740 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:47,740 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. after waiting 0 ms 2024-12-05T13:44:47,740 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:47,741 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733406286857.03198fd9f015828ee253aeb2c375c543. 2024-12-05T13:44:47,741 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 03198fd9f015828ee253aeb2c375c543: Waiting for close lock at 1733406287739Disabling compacts and flushes for region at 1733406287739Disabling writes for close at 1733406287740 (+1 ms)Writing region close event to WAL at 1733406287741 (+1 ms)Closed at 1733406287741 2024-12-05T13:44:47,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741870_1047 (size=93) 2024-12-05T13:44:47,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741870_1047 (size=93) 2024-12-05T13:44:47,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741870_1047 (size=93) 2024-12-05T13:44:47,748 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:47,748 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733406287475) 2024-12-05T13:44:47,762 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#test2727 Thread=407 (was 405) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:36032 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46754 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=927 (was 865) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=377 (was 377), ProcessCount=11 (was 11), AvailableMemoryMB=8571 (was 8726) 2024-12-05T13:44:47,773 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testSequentialEditLogSeqNum Thread=407, OpenFileDescriptor=927, MaxFileDescriptor=1048576, SystemLoadAverage=377, ProcessCount=11, AvailableMemoryMB=8570 2024-12-05T13:44:47,786 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:47,791 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:47,792 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1733406287791 2024-12-05T13:44:47,800 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791 2024-12-05T13:44:47,802 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:47,804 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 57fe0c136c1b5c62e6c920455631026c, NAME => 'testSequentialEditLogSeqNum,,1733406287786.57fe0c136c1b5c62e6c920455631026c.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:47,804 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1733406287786.57fe0c136c1b5c62e6c920455631026c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:47,804 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,804 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,805 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c doesn't exist for region: 57fe0c136c1b5c62e6c920455631026c on table testSequentialEditLogSeqNum 2024-12-05T13:44:47,805 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 57fe0c136c1b5c62e6c920455631026c on table testSequentialEditLogSeqNum 2024-12-05T13:44:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741875_1052 (size=62) 2024-12-05T13:44:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741875_1052 (size=62) 2024-12-05T13:44:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741875_1052 (size=62) 2024-12-05T13:44:47,817 INFO [StoreOpener-57fe0c136c1b5c62e6c920455631026c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,818 INFO [StoreOpener-57fe0c136c1b5c62e6c920455631026c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 57fe0c136c1b5c62e6c920455631026c columnFamilyName a 2024-12-05T13:44:47,818 DEBUG [StoreOpener-57fe0c136c1b5c62e6c920455631026c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:47,819 INFO [StoreOpener-57fe0c136c1b5c62e6c920455631026c-1 {}] regionserver.HStore(327): Store=57fe0c136c1b5c62e6c920455631026c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:47,819 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,820 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,820 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,820 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,820 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,822 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 57fe0c136c1b5c62e6c920455631026c 2024-12-05T13:44:47,824 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:47,825 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 57fe0c136c1b5c62e6c920455631026c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64944310, jitterRate=-0.03225436806678772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:44:47,825 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 57fe0c136c1b5c62e6c920455631026c: Writing region info on filesystem at 1733406287804Initializing all the Stores at 1733406287816 (+12 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406287816Cleaning up temporary data from old regions at 1733406287820 (+4 ms)Region opened successfully at 1733406287825 (+5 ms) 2024-12-05T13:44:47,838 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 57fe0c136c1b5c62e6c920455631026c 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-12-05T13:44:47,856 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/.tmp/a/b95b02e6e61448bfa3da8c6d63bd06eb is 81, key is testSequentialEditLogSeqNum/a:x0/1733406287826/Put/seqid=0 2024-12-05T13:44:47,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741876_1053 (size=5833) 2024-12-05T13:44:47,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741876_1053 (size=5833) 2024-12-05T13:44:47,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741876_1053 (size=5833) 2024-12-05T13:44:47,863 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/.tmp/a/b95b02e6e61448bfa3da8c6d63bd06eb 2024-12-05T13:44:47,871 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/.tmp/a/b95b02e6e61448bfa3da8c6d63bd06eb as hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/a/b95b02e6e61448bfa3da8c6d63bd06eb 2024-12-05T13:44:47,878 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/a/b95b02e6e61448bfa3da8c6d63bd06eb, entries=10, sequenceid=13, filesize=5.7 K 2024-12-05T13:44:47,880 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 57fe0c136c1b5c62e6c920455631026c in 43ms, sequenceid=13, compaction requested=false 2024-12-05T13:44:47,880 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 57fe0c136c1b5c62e6c920455631026c: 2024-12-05T13:44:47,886 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T13:44:47,886 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T13:44:47,887 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T13:44:47,887 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T13:44:47,887 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T13:44:47,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741874_1051 (size=1616) 2024-12-05T13:44:47,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741874_1051 (size=1616) 2024-12-05T13:44:47,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741874_1051 (size=1616) 2024-12-05T13:44:47,903 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791, size=1.6 K (1616bytes) 2024-12-05T13:44:47,904 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791 2024-12-05T13:44:47,904 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791 after 0ms 2024-12-05T13:44:47,907 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:47,907 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791 took 4ms 2024-12-05T13:44:47,909 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791 so closing down 2024-12-05T13:44:47,909 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:47,910 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733406287791.temp 2024-12-05T13:44:47,912 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000003-wal.1733406287791.temp 2024-12-05T13:44:47,912 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:47,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741877_1054 (size=1175) 2024-12-05T13:44:47,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741877_1054 (size=1175) 2024-12-05T13:44:47,923 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000003-wal.1733406287791.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:47,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741877_1054 (size=1175) 2024-12-05T13:44:47,924 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000003-wal.1733406287791.temp to hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000020 2024-12-05T13:44:47,925 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 18 ms; skipped=2; WAL=hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791, size=1.6 K, length=1616, corrupted=false, cancelled=false 2024-12-05T13:44:47,925 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791, journal: Splitting hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791, size=1.6 K (1616bytes) at 1733406287903Finishing writing output for hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791 so closing down at 1733406287909 (+6 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000003-wal.1733406287791.temp at 1733406287912 (+3 ms)3 split writer threads finished at 1733406287912Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000003-wal.1733406287791.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733406287923 (+11 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000003-wal.1733406287791.temp to hdfs://localhost:34105/hbase/data/default/testSequentialEditLogSeqNum/57fe0c136c1b5c62e6c920455631026c/recovered.edits/0000000000000000020 at 1733406287924 (+1 ms)Processed 17 edits across 1 Regions in 18 ms; skipped=2; WAL=hdfs://localhost:34105/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733406287785/wal.1733406287791, size=1.6 K, length=1616, corrupted=false, cancelled=false at 1733406287925 (+1 ms) 2024-12-05T13:44:47,938 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testSequentialEditLogSeqNum Thread=412 (was 407) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:32768 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:36032 [Waiting for operation #20] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46754 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=963 (was 927) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=377 (was 377), ProcessCount=11 (was 11), AvailableMemoryMB=8568 (was 8570) 2024-12-05T13:44:47,949 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testRegionMadeOfBulkLoadedFilesOnly Thread=412, OpenFileDescriptor=963, MaxFileDescriptor=1048576, SystemLoadAverage=377, ProcessCount=11, AvailableMemoryMB=8567 2024-12-05T13:44:47,965 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:47,967 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:47,999 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:48,003 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-34475777, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-34475777, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:48,009 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T13:44:48,010 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:48,013 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T13:44:48,013 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T13:44:48,013 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-05T13:44:48,013 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:48,014 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-12-05T13:44:48,014 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:48,015 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-12-05T13:44:48,015 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:48,016 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-34475777/hregion-34475777.1733406288003, exclude list is [], retry=0 2024-12-05T13:44:48,020 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:48,020 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:48,021 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:48,024 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-34475777/hregion-34475777.1733406288003 2024-12-05T13:44:48,025 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:44:48,025 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 848af81e8be75ce76759447f7bfd4b25, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:44:48,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741879_1056 (size=70) 2024-12-05T13:44:48,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741879_1056 (size=70) 2024-12-05T13:44:48,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741879_1056 (size=70) 2024-12-05T13:44:48,037 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:48,039 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,040 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName a 2024-12-05T13:44:48,041 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:48,041 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:48,041 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,043 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName b 2024-12-05T13:44:48,043 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:48,044 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:48,044 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,046 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName c 2024-12-05T13:44:48,046 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:48,046 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:48,046 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,047 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,048 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,048 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,049 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,049 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:48,050 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,053 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:48,053 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 848af81e8be75ce76759447f7bfd4b25; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72542028, jitterRate=0.08096045255661011}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:48,054 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 848af81e8be75ce76759447f7bfd4b25: Writing region info on filesystem at 1733406288037Initializing all the Stores at 1733406288038 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406288038Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406288038Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406288038Cleaning up temporary data from old regions at 1733406288049 (+11 ms)Region opened successfully at 1733406288054 (+5 ms) 2024-12-05T13:44:48,054 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 848af81e8be75ce76759447f7bfd4b25, disabling compactions & flushes 2024-12-05T13:44:48,054 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:48,055 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:48,055 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. after waiting 0 ms 2024-12-05T13:44:48,055 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:48,055 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:48,055 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 848af81e8be75ce76759447f7bfd4b25: Waiting for close lock at 1733406288054Disabling compacts and flushes for region at 1733406288054Disabling writes for close at 1733406288055 (+1 ms)Writing region close event to WAL at 1733406288055Closed at 1733406288055 2024-12-05T13:44:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741878_1055 (size=93) 2024-12-05T13:44:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741878_1055 (size=93) 2024-12-05T13:44:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741878_1055 (size=93) 2024-12-05T13:44:48,061 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:48,061 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-34475777:(num 1733406288003) 2024-12-05T13:44:48,061 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:48,063 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:48,075 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064, exclude list is [], retry=0 2024-12-05T13:44:48,077 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:48,078 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:48,078 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:48,080 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 2024-12-05T13:44:48,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:48,082 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 848af81e8be75ce76759447f7bfd4b25, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:48,083 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:48,083 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,083 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,085 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,086 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName a 2024-12-05T13:44:48,086 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:48,087 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:48,087 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,089 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName b 2024-12-05T13:44:48,089 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:48,089 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:48,089 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,091 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName c 2024-12-05T13:44:48,091 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:48,091 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:48,092 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,092 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,094 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,095 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,095 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,095 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:48,097 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:48,098 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 848af81e8be75ce76759447f7bfd4b25; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62022523, jitterRate=-0.07579238712787628}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:48,099 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 848af81e8be75ce76759447f7bfd4b25: Writing region info on filesystem at 1733406288083Initializing all the Stores at 1733406288084 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406288084Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406288085 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406288085Cleaning up temporary data from old regions at 1733406288095 (+10 ms)Region opened successfully at 1733406288099 (+4 ms) 2024-12-05T13:44:48,103 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1733406288102/Put/seqid=0 2024-12-05T13:44:48,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741881_1058 (size=4826) 2024-12-05T13:44:48,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741881_1058 (size=4826) 2024-12-05T13:44:48,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741881_1058 (size=4826) 2024-12-05T13:44:48,114 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34105/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in 848af81e8be75ce76759447f7bfd4b25/a 2024-12-05T13:44:48,122 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-12-05T13:44:48,122 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-05T13:44:48,123 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 848af81e8be75ce76759447f7bfd4b25: 2024-12-05T13:44:48,124 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/a/0310f5d6192f46668d446a675fe664eb_SeqId_3_ 2024-12-05T13:44:48,125 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34105/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 848af81e8be75ce76759447f7bfd4b25/a as hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/a/0310f5d6192f46668d446a675fe664eb_SeqId_3_ - updating store file list. 2024-12-05T13:44:48,131 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 0310f5d6192f46668d446a675fe664eb_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-05T13:44:48,131 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/a/0310f5d6192f46668d446a675fe664eb_SeqId_3_ into 848af81e8be75ce76759447f7bfd4b25/a 2024-12-05T13:44:48,131 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34105/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 848af81e8be75ce76759447f7bfd4b25/a (new location: hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/a/0310f5d6192f46668d446a675fe664eb_SeqId_3_) 2024-12-05T13:44:48,175 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064, size=0 (0bytes) 2024-12-05T13:44:48,175 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 might be still open, length is 0 2024-12-05T13:44:48,175 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 2024-12-05T13:44:48,176 WARN [IPC Server handler 0 on default port 34105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-12-05T13:44:48,176 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 after 1ms 2024-12-05T13:44:48,816 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:34848 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34848 dst: /127.0.0.1:44137 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44137 remote=/127.0.0.1:34848]. Total timeout mills is 60000, 59322 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:48,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:33158 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:43927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33158 dst: /127.0.0.1:43927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:48,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:52832 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:38521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52832 dst: /127.0.0.1:38521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:48,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741880_1059 (size=434) 2024-12-05T13:44:48,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741880_1059 (size=434) 2024-12-05T13:44:52,177 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 after 4002ms 2024-12-05T13:44:52,180 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:52,181 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 took 4006ms 2024-12-05T13:44:52,183 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064; continuing. 2024-12-05T13:44:52,183 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 so closing down 2024-12-05T13:44:52,183 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:52,184 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1733406288064.temp 2024-12-05T13:44:52,186 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005-wal.1733406288064.temp 2024-12-05T13:44:52,186 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:52,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741882_1060 (size=236) 2024-12-05T13:44:52,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741882_1060 (size=236) 2024-12-05T13:44:52,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741882_1060 (size=236) 2024-12-05T13:44:52,196 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005-wal.1733406288064.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:52,198 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005-wal.1733406288064.temp to hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005 2024-12-05T13:44:52,198 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 17 ms; skipped=1; WAL=hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064, size=0, length=0, corrupted=false, cancelled=false 2024-12-05T13:44:52,198 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064, journal: Splitting hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064, size=0 (0bytes) at 1733406288175Finishing writing output for hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 so closing down at 1733406292183 (+4008 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005-wal.1733406288064.temp at 1733406292186 (+3 ms)3 split writer threads finished at 1733406292186Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005-wal.1733406288064.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733406292196 (+10 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005-wal.1733406288064.temp to hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005 at 1733406292198 (+2 ms)Processed 2 edits across 1 Regions in 17 ms; skipped=1; WAL=hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064, size=0, length=0, corrupted=false, cancelled=false at 1733406292198 2024-12-05T13:44:52,200 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406288064 2024-12-05T13:44:52,201 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005 2024-12-05T13:44:52,201 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:52,204 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:52,220 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406292204, exclude list is [], retry=0 2024-12-05T13:44:52,223 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:52,223 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:52,223 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:52,225 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406292204 2024-12-05T13:44:52,225 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:44:52,226 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 848af81e8be75ce76759447f7bfd4b25, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:52,226 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:52,226 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,226 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,228 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,229 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName a 2024-12-05T13:44:52,229 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:52,235 DEBUG [StoreFileOpener-848af81e8be75ce76759447f7bfd4b25-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 0310f5d6192f46668d446a675fe664eb_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-05T13:44:52,235 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/a/0310f5d6192f46668d446a675fe664eb_SeqId_3_ 2024-12-05T13:44:52,235 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:52,235 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,236 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName b 2024-12-05T13:44:52,237 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:52,237 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:52,237 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,238 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 848af81e8be75ce76759447f7bfd4b25 columnFamilyName c 2024-12-05T13:44:52,238 DEBUG [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:52,238 INFO [StoreOpener-848af81e8be75ce76759447f7bfd4b25-1 {}] regionserver.HStore(327): Store=848af81e8be75ce76759447f7bfd4b25/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:52,239 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,239 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,241 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,241 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005 2024-12-05T13:44:52,243 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:52,244 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005 2024-12-05T13:44:52,244 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 848af81e8be75ce76759447f7bfd4b25 3/3 column families, dataSize=58 B heapSize=904 B 2024-12-05T13:44:52,257 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/.tmp/a/499e3168e53e430fa7d010a05e3cd8e3 is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1733406288137/Put/seqid=0 2024-12-05T13:44:52,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741884_1062 (size=5149) 2024-12-05T13:44:52,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741884_1062 (size=5149) 2024-12-05T13:44:52,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741884_1062 (size=5149) 2024-12-05T13:44:52,263 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/.tmp/a/499e3168e53e430fa7d010a05e3cd8e3 2024-12-05T13:44:52,269 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/.tmp/a/499e3168e53e430fa7d010a05e3cd8e3 as hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/a/499e3168e53e430fa7d010a05e3cd8e3 2024-12-05T13:44:52,275 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/a/499e3168e53e430fa7d010a05e3cd8e3, entries=1, sequenceid=5, filesize=5.0 K 2024-12-05T13:44:52,275 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 848af81e8be75ce76759447f7bfd4b25 in 31ms, sequenceid=5, compaction requested=false; wal=null 2024-12-05T13:44:52,276 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/0000000000000000005 2024-12-05T13:44:52,277 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,277 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,278 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:52,280 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 848af81e8be75ce76759447f7bfd4b25 2024-12-05T13:44:52,282 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/848af81e8be75ce76759447f7bfd4b25/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-12-05T13:44:52,284 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 848af81e8be75ce76759447f7bfd4b25; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59364733, jitterRate=-0.11539654433727264}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:52,284 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 848af81e8be75ce76759447f7bfd4b25: Writing region info on filesystem at 1733406292226Initializing all the Stores at 1733406292227 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406292227Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406292227Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406292227Obtaining lock to block concurrent updates at 1733406292244 (+17 ms)Preparing flush snapshotting stores in 848af81e8be75ce76759447f7bfd4b25 at 1733406292244Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1733406292244Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. at 1733406292244Flushing 848af81e8be75ce76759447f7bfd4b25/a: creating writer at 1733406292244Flushing 848af81e8be75ce76759447f7bfd4b25/a: appending metadata at 1733406292256 (+12 ms)Flushing 848af81e8be75ce76759447f7bfd4b25/a: closing flushed file at 1733406292256Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@322bd38b: reopening flushed file at 1733406292268 (+12 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 848af81e8be75ce76759447f7bfd4b25 in 31ms, sequenceid=5, compaction requested=false; wal=null at 1733406292275 (+7 ms)Cleaning up temporary data from old regions at 1733406292277 (+2 ms)Region opened successfully at 1733406292284 (+7 ms) 2024-12-05T13:44:52,288 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 848af81e8be75ce76759447f7bfd4b25, disabling compactions & flushes 2024-12-05T13:44:52,288 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:52,288 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:52,288 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. after waiting 0 ms 2024-12-05T13:44:52,288 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:52,289 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733406287966.848af81e8be75ce76759447f7bfd4b25. 2024-12-05T13:44:52,289 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 848af81e8be75ce76759447f7bfd4b25: Waiting for close lock at 1733406292288Disabling compacts and flushes for region at 1733406292288Disabling writes for close at 1733406292288Writing region close event to WAL at 1733406292289 (+1 ms)Closed at 1733406292289 2024-12-05T13:44:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741883_1061 (size=93) 2024-12-05T13:44:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741883_1061 (size=93) 2024-12-05T13:44:52,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741883_1061 (size=93) 2024-12-05T13:44:52,295 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:52,295 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733406292204) 2024-12-05T13:44:52,306 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testRegionMadeOfBulkLoadedFilesOnly Thread=415 (was 412) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1870628089_22 at /127.0.0.1:34876 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1870628089_22 at /127.0.0.1:52872 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:34105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1987708253) connection to localhost/127.0.0.1:34105 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=1022 (was 963) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=347 (was 377), ProcessCount=11 (was 11), AvailableMemoryMB=8556 (was 8567) 2024-12-05T13:44:52,317 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsAfterRegionMovedWithMultiCF Thread=415, OpenFileDescriptor=1022, MaxFileDescriptor=1048576, SystemLoadAverage=347, ProcessCount=11, AvailableMemoryMB=8556 2024-12-05T13:44:52,330 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:52,335 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T13:44:52,338 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is da6aa2204f50,39625,1733406277651 2024-12-05T13:44:52,340 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4f9f3280 2024-12-05T13:44:52,341 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T13:44:52,343 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59090, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T13:44:52,348 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T13:44:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-12-05T13:44:52,357 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T13:44:52,360 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-12-05T13:44:52,360 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:52,362 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T13:44:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T13:44:52,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741885_1063 (size=694) 2024-12-05T13:44:52,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741885_1063 (size=694) 2024-12-05T13:44:52,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741885_1063 (size=694) 2024-12-05T13:44:52,376 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13 2024-12-05T13:44:52,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741886_1064 (size=77) 2024-12-05T13:44:52,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741886_1064 (size=77) 2024-12-05T13:44:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741886_1064 (size=77) 2024-12-05T13:44:52,389 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:52,389 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, disabling compactions & flushes 2024-12-05T13:44:52,389 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:52,389 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:52,389 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. after waiting 0 ms 2024-12-05T13:44:52,389 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:52,389 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:52,389 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Waiting for close lock at 1733406292389Disabling compacts and flushes for region at 1733406292389Disabling writes for close at 1733406292389Writing region close event to WAL at 1733406292389Closed at 1733406292389 2024-12-05T13:44:52,391 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T13:44:52,397 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1733406292392"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733406292392"}]},"ts":"1733406292392"} 2024-12-05T13:44:52,402 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T13:44:52,404 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T13:44:52,407 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733406292404"}]},"ts":"1733406292404"} 2024-12-05T13:44:52,412 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-12-05T13:44:52,412 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {da6aa2204f50=0} racks are {/default-rack=0} 2024-12-05T13:44:52,414 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T13:44:52,414 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T13:44:52,414 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T13:44:52,414 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T13:44:52,414 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T13:44:52,414 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T13:44:52,414 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T13:44:52,414 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T13:44:52,414 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T13:44:52,414 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T13:44:52,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN}] 2024-12-05T13:44:52,419 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN 2024-12-05T13:44:52,420 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN; state=OFFLINE, location=da6aa2204f50,34361,1733406278491; forceNewPlan=false, retain=false 2024-12-05T13:44:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T13:44:52,573 INFO [da6aa2204f50:39625 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T13:44:52,574 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPENING, regionLocation=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:52,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN because future has completed 2024-12-05T13:44:52,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491}] 2024-12-05T13:44:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T13:44:52,737 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:52,737 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:52,738 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,738 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:52,738 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,738 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,740 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,742 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf1 2024-12-05T13:44:52,742 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:52,743 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:52,743 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,745 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf2 2024-12-05T13:44:52,745 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:52,746 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:52,746 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,747 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,748 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,748 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,748 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,749 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-05T13:44:52,751 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,754 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:52,755 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67830720, jitterRate=0.010756492614746094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-05T13:44:52,755 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:52,756 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Running coprocessor pre-open hook at 1733406292739Writing region info on filesystem at 1733406292739Initializing all the Stores at 1733406292740 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406292740Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406292740Cleaning up temporary data from old regions at 1733406292748 (+8 ms)Running coprocessor post-open hooks at 1733406292755 (+7 ms)Region opened successfully at 1733406292756 (+1 ms) 2024-12-05T13:44:52,757 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., pid=6, masterSystemTime=1733406292732 2024-12-05T13:44:52,761 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:52,761 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:52,763 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPEN, openSeqNum=2, regionLocation=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:52,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491 because future has completed 2024-12-05T13:44:52,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T13:44:52,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491 in 190 msec 2024-12-05T13:44:52,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T13:44:52,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN in 356 msec 2024-12-05T13:44:52,779 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T13:44:52,779 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733406292779"}]},"ts":"1733406292779"} 2024-12-05T13:44:52,782 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-12-05T13:44:52,784 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T13:44:52,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 434 msec 2024-12-05T13:44:52,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T13:44:52,999 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-12-05T13:44:52,999 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-12-05T13:44:53,000 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T13:44:53,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-12-05T13:44:53,007 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T13:44:53,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-12-05T13:44:53,020 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=2] 2024-12-05T13:44:53,036 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, source=da6aa2204f50,34361,1733406278491, destination=da6aa2204f50,43377,1733406278401, warming up region on da6aa2204f50,43377,1733406278401 2024-12-05T13:44:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T13:44:53,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, source=da6aa2204f50,34361,1733406278491, destination=da6aa2204f50,43377,1733406278401, running balancer 2024-12-05T13:44:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE 2024-12-05T13:44:53,041 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE 2024-12-05T13:44:53,044 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=CLOSING, regionLocation=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:53,045 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46505, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T13:44:53,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE because future has completed 2024-12-05T13:44:53,055 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T13:44:53,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(7855): Warmup {ENCODED => 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:53,056 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491}] 2024-12-05T13:44:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:53,058 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,060 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf1 2024-12-05T13:44:53,060 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:53,061 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:53,061 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,062 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf2 2024-12-05T13:44:53,062 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:53,063 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(1722): Closing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, disabling compactions & flushes 2024-12-05T13:44:53,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. after waiting 0 ms 2024-12-05T13:44:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] regionserver.HRegion(1676): Region close journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Waiting for close lock at 1733406293063Disabling compacts and flushes for region at 1733406293063Disabling writes for close at 1733406293064 (+1 ms)Writing region close event to WAL at 1733406293064Closed at 1733406293064 2024-12-05T13:44:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-12-05T13:44:53,214 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,214 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T13:44:53,215 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, disabling compactions & flushes 2024-12-05T13:44:53,215 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,215 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,215 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. after waiting 0 ms 2024-12-05T13:44:53,215 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,215 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2/2 column families, dataSize=31 B heapSize=616 B 2024-12-05T13:44:53,230 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf1/131fd3436a4d430aa5a322cdf4e60458 is 35, key is r1/cf1:q/1733406293022/Put/seqid=0 2024-12-05T13:44:53,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741887_1065 (size=4783) 2024-12-05T13:44:53,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741887_1065 (size=4783) 2024-12-05T13:44:53,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741887_1065 (size=4783) 2024-12-05T13:44:53,237 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf1/131fd3436a4d430aa5a322cdf4e60458 2024-12-05T13:44:53,245 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf1/131fd3436a4d430aa5a322cdf4e60458 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458 2024-12-05T13:44:53,253 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T13:44:53,254 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff in 39ms, sequenceid=5, compaction requested=false 2024-12-05T13:44:53,255 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-12-05T13:44:53,260 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T13:44:53,263 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,263 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Waiting for close lock at 1733406293215Running coprocessor pre-close hooks at 1733406293215Disabling compacts and flushes for region at 1733406293215Disabling writes for close at 1733406293215Obtaining lock to block concurrent updates at 1733406293215Preparing flush snapshotting stores in 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff at 1733406293215Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1733406293215Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. at 1733406293216 (+1 ms)Flushing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1: creating writer at 1733406293216Flushing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1: appending metadata at 1733406293230 (+14 ms)Flushing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1: closing flushed file at 1733406293230Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52d0b51d: reopening flushed file at 1733406293244 (+14 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff in 39ms, sequenceid=5, compaction requested=false at 1733406293255 (+11 ms)Writing region close event to WAL at 1733406293256 (+1 ms)Running coprocessor post-close hooks at 1733406293261 (+5 ms)Closed at 1733406293263 (+2 ms) 2024-12-05T13:44:53,264 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff move to da6aa2204f50,43377,1733406278401 record at close sequenceid=5 2024-12-05T13:44:53,267 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,268 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=CLOSED 2024-12-05T13:44:53,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491 because future has completed 2024-12-05T13:44:53,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T13:44:53,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491 in 217 msec 2024-12-05T13:44:53,277 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE; state=CLOSED, location=da6aa2204f50,43377,1733406278401; forceNewPlan=false, retain=false 2024-12-05T13:44:53,428 INFO [da6aa2204f50:39625 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T13:44:53,428 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPENING, regionLocation=da6aa2204f50,43377,1733406278401 2024-12-05T13:44:53,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE because future has completed 2024-12-05T13:44:53,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,43377,1733406278401}] 2024-12-05T13:44:53,589 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,589 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:53,589 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,589 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:53,589 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,589 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,591 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,592 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf1 2024-12-05T13:44:53,592 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:53,599 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458 2024-12-05T13:44:53,599 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:53,599 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,600 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf2 2024-12-05T13:44:53,600 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:53,601 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:53,601 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,602 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,603 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,603 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,603 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,604 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-05T13:44:53,606 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,606 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75407046, jitterRate=0.12365254759788513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-05T13:44:53,606 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,607 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Running coprocessor pre-open hook at 1733406293590Writing region info on filesystem at 1733406293590Initializing all the Stores at 1733406293591 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406293591Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406293591Cleaning up temporary data from old regions at 1733406293604 (+13 ms)Running coprocessor post-open hooks at 1733406293607 (+3 ms)Region opened successfully at 1733406293607 2024-12-05T13:44:53,608 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., pid=9, masterSystemTime=1733406293584 2024-12-05T13:44:53,611 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,611 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,612 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPEN, openSeqNum=9, regionLocation=da6aa2204f50,43377,1733406278401 2024-12-05T13:44:53,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,43377,1733406278401 because future has completed 2024-12-05T13:44:53,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-05T13:44:53,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,43377,1733406278401 in 184 msec 2024-12-05T13:44:53,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE in 579 msec 2024-12-05T13:44:53,647 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T13:44:53,649 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T13:44:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.2:55146 deadline: 1733406353653, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=43377 startCode=1733406278401. As of locationSeqNum=5. 2024-12-05T13:44:53,680 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=43377 startCode=1733406278401. As of locationSeqNum=5. 2024-12-05T13:44:53,680 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=43377 startCode=1733406278401. As of locationSeqNum=5. 2024-12-05T13:44:53,681 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,43377,1733406278401, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=43377 startCode=1733406278401. As of locationSeqNum=5. 2024-12-05T13:44:53,787 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:44:53,789 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38352, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:44:53,799 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2/2 column families, dataSize=50 B heapSize=720 B 2024-12-05T13:44:53,822 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d is 29, key is r1/cf1:/1733406293791/DeleteFamily/seqid=0 2024-12-05T13:44:53,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741888_1066 (size=4906) 2024-12-05T13:44:53,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741888_1066 (size=4906) 2024-12-05T13:44:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741888_1066 (size=4906) 2024-12-05T13:44:53,833 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d 2024-12-05T13:44:53,839 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a19c2fd86c2f4d7ebb7acc467aa4358d 2024-12-05T13:44:53,854 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf2/75ef22386b044109904d540ee79c1f41 is 29, key is r1/cf2:/1733406293791/DeleteFamily/seqid=0 2024-12-05T13:44:53,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741889_1067 (size=4906) 2024-12-05T13:44:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741889_1067 (size=4906) 2024-12-05T13:44:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741889_1067 (size=4906) 2024-12-05T13:44:53,861 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf2/75ef22386b044109904d540ee79c1f41 2024-12-05T13:44:53,867 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 75ef22386b044109904d540ee79c1f41 2024-12-05T13:44:53,868 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d 2024-12-05T13:44:53,874 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a19c2fd86c2f4d7ebb7acc467aa4358d 2024-12-05T13:44:53,874 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d, entries=1, sequenceid=12, filesize=4.8 K 2024-12-05T13:44:53,876 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf2/75ef22386b044109904d540ee79c1f41 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/75ef22386b044109904d540ee79c1f41 2024-12-05T13:44:53,882 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 75ef22386b044109904d540ee79c1f41 2024-12-05T13:44:53,882 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/75ef22386b044109904d540ee79c1f41, entries=1, sequenceid=12, filesize=4.8 K 2024-12-05T13:44:53,884 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff in 85ms, sequenceid=12, compaction requested=false 2024-12-05T13:44:53,884 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: 2024-12-05T13:44:53,886 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-05T13:44:53,888 DEBUG [Time-limited test {}] regionserver.HStore(1541): 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1 is initiating major compaction (all files) 2024-12-05T13:44:53,888 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T13:44:53,888 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:53,888 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,889 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458, hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d] into tmpdir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp, totalSize=9.5 K 2024-12-05T13:44:53,890 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 131fd3436a4d430aa5a322cdf4e60458, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733406293022 2024-12-05T13:44:53,891 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a19c2fd86c2f4d7ebb7acc467aa4358d, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-05T13:44:53,902 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff#cf1#compaction#16 average throughput is 0.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T13:44:53,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741890_1068 (size=4626) 2024-12-05T13:44:53,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741890_1068 (size=4626) 2024-12-05T13:44:53,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741890_1068 (size=4626) 2024-12-05T13:44:53,921 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf1/327c8da8c58047c6850e145bd74b347e as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/327c8da8c58047c6850e145bd74b347e 2024-12-05T13:44:53,937 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1 of 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff into 327c8da8c58047c6850e145bd74b347e(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T13:44:53,937 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: 2024-12-05T13:44:53,937 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-05T13:44:53,937 DEBUG [Time-limited test {}] regionserver.HStore(1541): 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2 is initiating major compaction (all files) 2024-12-05T13:44:53,937 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T13:44:53,938 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:44:53,938 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,938 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/75ef22386b044109904d540ee79c1f41] into tmpdir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp, totalSize=4.8 K 2024-12-05T13:44:53,939 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 75ef22386b044109904d540ee79c1f41, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-05T13:44:53,945 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-05T13:44:53,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741891_1069 (size=4592) 2024-12-05T13:44:53,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741891_1069 (size=4592) 2024-12-05T13:44:53,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741891_1069 (size=4592) 2024-12-05T13:44:53,961 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/.tmp/cf2/7172fca40fd54de985aee82179f2bce7 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/7172fca40fd54de985aee82179f2bce7 2024-12-05T13:44:53,970 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2 of 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff into 7172fca40fd54de985aee82179f2bce7(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T13:44:53,970 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: 2024-12-05T13:44:53,975 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, source=da6aa2204f50,43377,1733406278401, destination=da6aa2204f50,34361,1733406278491, warming up region on da6aa2204f50,34361,1733406278491 2024-12-05T13:44:53,975 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, source=da6aa2204f50,43377,1733406278401, destination=da6aa2204f50,34361,1733406278491, running balancer 2024-12-05T13:44:53,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE 2024-12-05T13:44:53,977 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE 2024-12-05T13:44:53,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:53,979 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=CLOSING, regionLocation=da6aa2204f50,43377,1733406278401 2024-12-05T13:44:53,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(7855): Warmup {ENCODED => 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:53,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:53,980 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:53,981 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf1 2024-12-05T13:44:53,981 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:53,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE because future has completed 2024-12-05T13:44:53,983 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T13:44:53,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,43377,1733406278401}] 2024-12-05T13:44:53,989 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458 2024-12-05T13:44:53,996 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/327c8da8c58047c6850e145bd74b347e 2024-12-05T13:44:54,004 INFO [StoreFileOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a19c2fd86c2f4d7ebb7acc467aa4358d 2024-12-05T13:44:54,004 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d 2024-12-05T13:44:54,004 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:54,004 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,006 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf2 2024-12-05T13:44:54,006 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:54,017 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/7172fca40fd54de985aee82179f2bce7 2024-12-05T13:44:54,026 INFO [StoreFileOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 75ef22386b044109904d540ee79c1f41 2024-12-05T13:44:54,027 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/75ef22386b044109904d540ee79c1f41 2024-12-05T13:44:54,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-12-05T13:44:54,027 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:54,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(1722): Closing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, disabling compactions & flushes 2024-12-05T13:44:54,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. after waiting 0 ms 2024-12-05T13:44:54,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,035 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34361 {}] regionserver.HRegion(1676): Region close journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Waiting for close lock at 1733406294027Disabling compacts and flushes for region at 1733406294027Disabling writes for close at 1733406294027Writing region close event to WAL at 1733406294035 (+8 ms)Closed at 1733406294035 2024-12-05T13:44:54,138 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,138 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T13:44:54,138 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, disabling compactions & flushes 2024-12-05T13:44:54,138 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,138 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,138 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. after waiting 0 ms 2024-12-05T13:44:54,138 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,138 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458, hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d] to archive 2024-12-05T13:44:54,142 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T13:44:54,145 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458 to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/131fd3436a4d430aa5a322cdf4e60458 2024-12-05T13:44:54,147 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/a19c2fd86c2f4d7ebb7acc467aa4358d 2024-12-05T13:44:54,158 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/75ef22386b044109904d540ee79c1f41] to archive 2024-12-05T13:44:54,159 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T13:44:54,161 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/75ef22386b044109904d540ee79c1f41 to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/75ef22386b044109904d540ee79c1f41 2024-12-05T13:44:54,166 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-12-05T13:44:54,166 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,166 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Waiting for close lock at 1733406294138Running coprocessor pre-close hooks at 1733406294138Disabling compacts and flushes for region at 1733406294138Disabling writes for close at 1733406294138Writing region close event to WAL at 1733406294162 (+24 ms)Running coprocessor post-close hooks at 1733406294166 (+4 ms)Closed at 1733406294166 2024-12-05T13:44:54,167 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff move to da6aa2204f50,34361,1733406278491 record at close sequenceid=12 2024-12-05T13:44:54,169 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,171 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=CLOSED 2024-12-05T13:44:54,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,43377,1733406278401 because future has completed 2024-12-05T13:44:54,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-05T13:44:54,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,43377,1733406278401 in 197 msec 2024-12-05T13:44:54,184 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE; state=CLOSED, location=da6aa2204f50,34361,1733406278491; forceNewPlan=false, retain=false 2024-12-05T13:44:54,334 INFO [da6aa2204f50:39625 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T13:44:54,335 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPENING, regionLocation=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:54,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE because future has completed 2024-12-05T13:44:54,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491}] 2024-12-05T13:44:54,500 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,501 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:54,502 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,502 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:54,502 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,502 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,505 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,507 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf1 2024-12-05T13:44:54,507 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:54,515 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/327c8da8c58047c6850e145bd74b347e 2024-12-05T13:44:54,516 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:54,516 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,517 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf2 2024-12-05T13:44:54,517 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:54,526 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/7172fca40fd54de985aee82179f2bce7 2024-12-05T13:44:54,526 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:54,526 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,527 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,528 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,529 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,529 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,530 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-05T13:44:54,531 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,532 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73147412, jitterRate=0.08998137712478638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-05T13:44:54,533 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,533 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Running coprocessor pre-open hook at 1733406294503Writing region info on filesystem at 1733406294503Initializing all the Stores at 1733406294505 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406294505Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406294505Cleaning up temporary data from old regions at 1733406294529 (+24 ms)Running coprocessor post-open hooks at 1733406294533 (+4 ms)Region opened successfully at 1733406294533 2024-12-05T13:44:54,535 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., pid=12, masterSystemTime=1733406294491 2024-12-05T13:44:54,537 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,537 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,538 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPEN, openSeqNum=18, regionLocation=da6aa2204f50,34361,1733406278491 2024-12-05T13:44:54,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491 because future has completed 2024-12-05T13:44:54,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-05T13:44:54,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,34361,1733406278491 in 205 msec 2024-12-05T13:44:54,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, REOPEN/MOVE in 571 msec 2024-12-05T13:44:54,579 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T13:44:54,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48924, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T13:44:54,582 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server da6aa2204f50,34361,1733406278491: testing ***** 2024-12-05T13:44:54,582 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-05T13:44:54,584 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-05T13:44:54,585 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-05T13:44:54,587 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-05T13:44:54,588 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-05T13:44:54,597 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 498091592 }, "NonHeapMemoryUsage": { "committed": 170328064, "init": 7667712, "max": -1, "used": 167745424 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "da6aa2204f50", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2068, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 1, "ProcessCallTime_max": 13, "ProcessCallTime_mean": 4, "ProcessCallTime_25th_percentile": 4, "ProcessCallTime_median": 7, "ProcessCallTime_75th_percentile": 10, "ProcessCallTime_90th_percentile": 11, "ProcessCallTime_95th_percentile": 12, "ProcessCallTime_98th_percentile": 12, "ProcessCallTime_99th_percentile": 12, "ProcessCallTime_99.9th_percentile": 12, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 1, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 13, "TotalCallTime_mean": 4, "TotalCallTime_25th_percentile": 4, "TotalCallTime_median": 7, "TotalCallTime_75th_percentile": 10, "TotalCallTime_90th_percentile": 11, "TotalCallTime_95th_percentile": 12, "TotalCallTime_98th_percentile": 12, "TotalCallTime_99th_percentile": 12, "TotalCallTime_99.9th_percentile": 12, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 175, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 131, "ResponseSize_90th_percentile": 157, "ResponseSize_95th_percentile": 166, "ResponseSize_98th_percentile": 171, "ResponseSize_99th_percentile": 173, "ResponseSize_99.9th_percentile": 174, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 352 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "da6aa2204f50", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:53425", "tag.serverName": "da6aa2204f50,34361,1733406278491", "tag.clusterId": "20de6e0f-2932-48fa-9e1b-78c45c9fed57", "tag.Context": "regionserver", "tag.Hostname": "da6aa2204f50", "regionCount": 1, "storeCount": 4, "hlogFileCount": 2, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 74, "memStoreHeapSize": 1248, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1733406278491, "averageRegionSize": 74, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 1.4, "writeRequestRatePerSecond": 0.4, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 199680, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 1, "activeScanners": 0, "totalRequestCount": 7, "totalRowActionRequestCount": 9, "readRequestCount": 7, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 2, "rpcGetRequestCount": 1, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 4, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 2, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 4, "ScanTime_min": 0, "ScanTime_max": 2, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 1, "ScanTime_75th_percentile": 1, "ScanTime_90th_percentile": 1, "ScanTime_95th_percentile": 1, "ScanTime_98th_percentile": 1, "ScanTime_99th_percentile": 1, "ScanTime_99.9th_percentile": 1, "ScanTime_TimeRangeCount_0-1": 4, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 2, "Put_min": 2, "Put_max": 10, "Put_mean": 6, "Put_25th_percentile": 4, "Put_median": 6, "Put_75th_percentile": 8, "Put_90th_percentile": 9, "Put_95th_percentile": 9, "Put_98th_percentile": 9, "Put_99th_percentile": 9, "Put_99.9th_percentile": 9, "Put_TimeRangeCount_0-1": 2, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 1, "Get_min": 1, "Get_max": 1, "Get_mean": 1, "Get_25th_percentile": 1, "Get_median": 1, "Get_75th_percentile": 1, "Get_90th_percentile": 1, "Get_95th_percentile": 1, "Get_98th_percentile": 1, "Get_99th_percentile": 1, "Get_99.9th_percentile": 1, "Get_TimeRangeCount_0-1": 1, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 4, "ScanSize_min": 0, "ScanSize_max": 144, "ScanSize_mean": 72, "ScanSize_25th_percentile": 36, "ScanSize_median": 72, "ScanSize_75th_percentile": 108, "ScanSize_90th_percentile": 129, "ScanSize_95th_percentile": 136, "ScanSize_98th_percentile": 141, "ScanSize_99th_percentile": 142, "ScanSize_99.9th_percentile": 143, "ScanSize_SizeRangeCount_0-10": 4, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-12-05T13:44:54,601 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39625 {}] master.MasterRpcServices(700): da6aa2204f50,34361,1733406278491 reported a fatal error: ***** ABORTING region server da6aa2204f50,34361,1733406278491: testing ***** 2024-12-05T13:44:54,604 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'da6aa2204f50,34361,1733406278491' ***** 2024-12-05T13:44:54,604 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-12-05T13:44:54,604 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T13:44:54,604 INFO [RS:1;da6aa2204f50:34361 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-12-05T13:44:54,604 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T13:44:54,605 INFO [RS:1;da6aa2204f50:34361 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-12-05T13:44:54,605 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(3091): Received CLOSE for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,605 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(956): aborting server da6aa2204f50,34361,1733406278491 2024-12-05T13:44:54,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43377 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.2:38352 deadline: 1733406354605, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=34361 startCode=1733406278491. As of locationSeqNum=12. 2024-12-05T13:44:54,605 INFO [RS:1;da6aa2204f50:34361 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:44:54,605 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, disabling compactions & flushes 2024-12-05T13:44:54,605 INFO [RS:1;da6aa2204f50:34361 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;da6aa2204f50:34361. 2024-12-05T13:44:54,605 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,606 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,606 DEBUG [RS:1;da6aa2204f50:34361 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:44:54,606 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. after waiting 0 ms 2024-12-05T13:44:54,606 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,606 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,43377,1733406278401, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,43377,1733406278401, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=34361 startCode=1733406278491. As of locationSeqNum=12. 2024-12-05T13:44:54,606 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,43377,1733406278401, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=34361 startCode=1733406278491. As of locationSeqNum=12. 2024-12-05T13:44:54,606 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,43377,1733406278401, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=da6aa2204f50 port=34361 startCode=1733406278491. As of locationSeqNum=12. 2024-12-05T13:44:54,606 DEBUG [RS:1;da6aa2204f50:34361 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:44:54,607 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T13:44:54,607 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T13:44:54,607 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T13:44:54,607 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T13:44:54,607 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T13:44:54,607 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T13:44:54,607 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.} 2024-12-05T13:44:54,607 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T13:44:54,607 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T13:44:54,608 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T13:44:54,608 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T13:44:54,608 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,608 DEBUG [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:54,608 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Waiting for close lock at 1733406294605Running coprocessor pre-close hooks at 1733406294605Disabling compacts and flushes for region at 1733406294605Disabling writes for close at 1733406294606 (+1 ms)Writing region close event to WAL at 1733406294607 (+1 ms)Running coprocessor post-close hooks at 1733406294607Closed at 1733406294608 (+1 ms) 2024-12-05T13:44:54,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server da6aa2204f50,34361,1733406278491 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:54,608 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:54,608 ERROR [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1960): Memstore data size is 5811 in region hbase:meta,,1.1588230740 2024-12-05T13:44:54,608 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T13:44:54,609 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T13:44:54,609 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733406294607Running coprocessor pre-close hooks at 1733406294607Disabling compacts and flushes for region at 1733406294607Disabling writes for close at 1733406294608 (+1 ms)Writing region close event to WAL at 1733406294608Running coprocessor post-close hooks at 1733406294608Closed at 1733406294608 2024-12-05T13:44:54,609 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T13:44:54,609 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server da6aa2204f50,34361,1733406278491 aborting 2024-12-05T13:44:54,609 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server da6aa2204f50,34361,1733406278491 aborting 2024-12-05T13:44:54,609 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 from cache 2024-12-05T13:44:54,612 INFO [regionserver/da6aa2204f50:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:44:54,717 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:44:54,719 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1] 2024-12-05T13:44:54,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server da6aa2204f50,34361,1733406278491 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:54,720 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server da6aa2204f50,34361,1733406278491 aborting 2024-12-05T13:44:54,720 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server da6aa2204f50,34361,1733406278491 aborting 2024-12-05T13:44:54,720 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 from cache 2024-12-05T13:44:54,799 INFO [regionserver/da6aa2204f50:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T13:44:54,800 INFO [regionserver/da6aa2204f50:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T13:44:54,808 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(976): stopping server da6aa2204f50,34361,1733406278491; all regions closed. 2024-12-05T13:44:54,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741836_1012 (size=2747) 2024-12-05T13:44:54,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741836_1012 (size=2747) 2024-12-05T13:44:54,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741836_1012 (size=2747) 2024-12-05T13:44:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741833_1009 (size=1677) 2024-12-05T13:44:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741833_1009 (size=1677) 2024-12-05T13:44:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741833_1009 (size=1677) 2024-12-05T13:44:54,818 DEBUG [RS:1;da6aa2204f50:34361 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:44:54,818 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:44:54,818 INFO [RS:1;da6aa2204f50:34361 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:44:54,818 INFO [RS:1;da6aa2204f50:34361 {}] hbase.ChoreService(370): Chore service for: regionserver/da6aa2204f50:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T13:44:54,819 INFO [RS:1;da6aa2204f50:34361 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:44:54,819 INFO [regionserver/da6aa2204f50:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:44:54,819 INFO [RS:1;da6aa2204f50:34361 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34361 2024-12-05T13:44:54,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/da6aa2204f50,34361,1733406278491 2024-12-05T13:44:54,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:44:54,846 INFO [RS:1;da6aa2204f50:34361 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:44:54,888 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [da6aa2204f50,34361,1733406278491] 2024-12-05T13:44:54,928 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:44:54,930 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1] 2024-12-05T13:44:54,931 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/da6aa2204f50,34361,1733406278491 already deleted, retry=false 2024-12-05T13:44:54,931 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of da6aa2204f50,34361,1733406278491 on da6aa2204f50,39625,1733406277651 2024-12-05T13:44:54,932 WARN [RPCClient-NioEventLoopGroup-6-3 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server da6aa2204f50:34361 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: da6aa2204f50/172.17.0.2:34361 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:54,933 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1, error=java.net.ConnectException: Call to address=da6aa2204f50:34361 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: da6aa2204f50/172.17.0.2:34361 2024-12-05T13:44:54,933 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 is java.net.ConnectException: Connection refused 2024-12-05T13:44:54,933 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 from cache 2024-12-05T13:44:54,933 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.FailedServers(52): Added failed server with address da6aa2204f50:34361 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: da6aa2204f50/172.17.0.2:34361 2024-12-05T13:44:54,936 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure da6aa2204f50,34361,1733406278491, splitWal=true, meta=true 2024-12-05T13:44:54,937 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for da6aa2204f50,34361,1733406278491 (carryingMeta=true) da6aa2204f50,34361,1733406278491/CRASHED/regionCount=2/lock=java.util.concurrent.locks.ReentrantReadWriteLock@461a1306[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-12-05T13:44:54,938 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure da6aa2204f50,34361,1733406278491, splitWal=true, meta=true 2024-12-05T13:44:54,939 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_META_LOGS, hasLock=true; ServerCrashProcedure da6aa2204f50,34361,1733406278491, splitWal=true, meta=true, isMeta: true 2024-12-05T13:44:54,940 DEBUG [PEWorker-5 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting 2024-12-05T13:44:54,941 INFO [PEWorker-5 {}] master.SplitWALManager(105): da6aa2204f50,34361,1733406278491 WAL count=1, meta=true 2024-12-05T13:44:54,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta}] 2024-12-05T13:44:54,948 DEBUG [PEWorker-4 {}] master.SplitWALManager(158): Acquired split WAL worker=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:54,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, worker=da6aa2204f50,42407,1733406278528}] 2024-12-05T13:44:54,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:44:54,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34361-0x101a7065f890002, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:44:54,989 INFO [RS:1;da6aa2204f50:34361 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:44:54,989 INFO [RS:1;da6aa2204f50:34361 {}] regionserver.HRegionServer(1031): Exiting; stopping=da6aa2204f50,34361,1733406278491; zookeeper connection closed. 2024-12-05T13:44:54,990 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@405f2a2e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@405f2a2e 2024-12-05T13:44:55,109 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T13:44:55,110 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53513, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T13:44:55,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42407 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-12-05T13:44:55,130 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, size=2.7 K (2747bytes) 2024-12-05T13:44:55,130 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta 2024-12-05T13:44:55,130 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta after 0ms 2024-12-05T13:44:55,133 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:55,133 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta took 3ms 2024-12-05T13:44:55,139 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 1588230740: last_flushed_sequence_id: 18446744073709551615 store_sequence_id { family_name: "info" sequence_id: 5 } store_sequence_id { family_name: "ns" sequence_id: 3 } store_sequence_id { family_name: "rep_barrier" sequence_id: 18446744073709551615 } store_sequence_id { family_name: "table" sequence_id: 6 } 2024-12-05T13:44:55,140 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta so closing down 2024-12-05T13:44:55,140 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:55,141 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta.temp 2024-12-05T13:44:55,142 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta.temp 2024-12-05T13:44:55,143 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:55,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741892_1070 (size=2527) 2024-12-05T13:44:55,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741892_1070 (size=2527) 2024-12-05T13:44:55,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741892_1070 (size=2527) 2024-12-05T13:44:55,150 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:55,151 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta.temp to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-05T13:44:55,151 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 16 edits across 1 Regions in 18 ms; skipped=1; WAL=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, size=2.7 K, length=2747, corrupted=false, cancelled=false 2024-12-05T13:44:55,152 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, journal: Splitting hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, size=2.7 K (2747bytes) at 1733406295130Finishing writing output for hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta so closing down at 1733406295140 (+10 ms)Creating recovered edits writer path=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta.temp at 1733406295142 (+2 ms)3 split writer threads finished at 1733406295143 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733406295150 (+7 ms)Rename recovered edits hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta.temp to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 at 1733406295151 (+1 ms)Processed 16 edits across 1 Regions in 18 ms; skipped=1; WAL=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, size=2.7 K, length=2747, corrupted=false, cancelled=false at 1733406295152 (+1 ms) 2024-12-05T13:44:55,152 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta 2024-12-05T13:44:55,153 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-05T13:44:55,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-12-05T13:44:55,159 INFO [PEWorker-3 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs 2024-12-05T13:44:55,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-05T13:44:55,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, worker=da6aa2204f50,42407,1733406278528 in 209 msec 2024-12-05T13:44:55,164 DEBUG [PEWorker-1 {}] master.SplitWALManager(172): Release split WAL worker=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:55,168 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-05T13:44:55,168 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure da6aa2204f50%2C34361%2C1733406278491.meta.1733406280381.meta, worker=da6aa2204f50,42407,1733406278528 in 221 msec 2024-12-05T13:44:55,169 INFO [PEWorker-5 {}] master.SplitLogManager(171): hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting dir is empty, no logs to split. 2024-12-05T13:44:55,169 INFO [PEWorker-5 {}] master.SplitWALManager(105): da6aa2204f50,34361,1733406278491 WAL count=0, meta=true 2024-12-05T13:44:55,170 DEBUG [PEWorker-5 {}] procedure.ServerCrashProcedure(329): Check if da6aa2204f50,34361,1733406278491 WAL splitting is done? wals=0, meta=true 2024-12-05T13:44:55,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T13:44:55,173 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T13:44:55,174 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-05T13:44:55,237 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:44:55,239 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1] 2024-12-05T13:44:55,239 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.AbstractRpcClient(357): Not trying to connect to da6aa2204f50:34361 this server is in the failed servers list 2024-12-05T13:44:55,240 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=da6aa2204f50:34361 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: da6aa2204f50:34361 2024-12-05T13:44:55,240 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: da6aa2204f50:34361 2024-12-05T13:44:55,240 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 from cache 2024-12-05T13:44:55,324 DEBUG [da6aa2204f50:39625 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-12-05T13:44:55,325 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(204): Hosts are {da6aa2204f50=0} racks are {/default-rack=0} 2024-12-05T13:44:55,325 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T13:44:55,325 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T13:44:55,325 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T13:44:55,325 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T13:44:55,325 INFO [da6aa2204f50:39625 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T13:44:55,325 INFO [da6aa2204f50:39625 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T13:44:55,325 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T13:44:55,326 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:55,327 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,42407,1733406278528, state=OPENING 2024-12-05T13:44:55,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:55,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:55,337 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:55,337 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T13:44:55,338 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:55,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=da6aa2204f50,42407,1733406278528}] 2024-12-05T13:44:55,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:55,338 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:55,495 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T13:44:55,495 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:55,496 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T13:44:55,498 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C42407%2C1733406278528.meta, suffix=.meta, logDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,42407,1733406278528, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs, maxLogs=32 2024-12-05T13:44:55,511 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,42407,1733406278528/da6aa2204f50%2C42407%2C1733406278528.meta.1733406295499.meta, exclude list is [], retry=0 2024-12-05T13:44:55,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:55,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:55,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:55,515 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,42407,1733406278528/da6aa2204f50%2C42407%2C1733406278528.meta.1733406295499.meta 2024-12-05T13:44:55,516 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:55,516 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:55,516 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T13:44:55,516 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T13:44:55,517 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T13:44:55,517 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T13:44:55,517 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:55,517 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T13:44:55,517 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T13:44:55,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T13:44:55,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T13:44:55,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:55,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:55,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T13:44:55,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T13:44:55,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:55,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:55,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T13:44:55,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T13:44:55,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:55,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:55,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T13:44:55,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T13:44:55,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:55,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:44:55,524 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T13:44:55,525 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740 2024-12-05T13:44:55,526 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740 2024-12-05T13:44:55,527 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-05T13:44:55,529 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000018: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:55,531 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5793): Applied 40, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=18, path=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-05T13:44:55,531 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.67 KB heapSize=9.66 KB 2024-12-05T13:44:55,548 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/info/0795a6e888f04158ad3210b8350f70dc is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff./info:regioninfo/1733406294538/Put/seqid=0 2024-12-05T13:44:55,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741894_1072 (size=11177) 2024-12-05T13:44:55,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741894_1072 (size=11177) 2024-12-05T13:44:55,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741894_1072 (size=11177) 2024-12-05T13:44:55,556 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.46 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/info/0795a6e888f04158ad3210b8350f70dc 2024-12-05T13:44:55,574 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/ns/88db7e5db22d4b5a85fa75b041d923d7 is 43, key is default/ns:d/1733406280620/Put/seqid=0 2024-12-05T13:44:55,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741895_1073 (size=5153) 2024-12-05T13:44:55,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741895_1073 (size=5153) 2024-12-05T13:44:55,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741895_1073 (size=5153) 2024-12-05T13:44:55,581 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/ns/88db7e5db22d4b5a85fa75b041d923d7 2024-12-05T13:44:55,599 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/table/329a41f48333420486333cfe5b0283c9 is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1733406292779/Put/seqid=0 2024-12-05T13:44:55,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741896_1074 (size=5431) 2024-12-05T13:44:55,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741896_1074 (size=5431) 2024-12-05T13:44:55,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741896_1074 (size=5431) 2024-12-05T13:44:55,606 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/table/329a41f48333420486333cfe5b0283c9 2024-12-05T13:44:55,612 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/info/0795a6e888f04158ad3210b8350f70dc as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/info/0795a6e888f04158ad3210b8350f70dc 2024-12-05T13:44:55,618 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/info/0795a6e888f04158ad3210b8350f70dc, entries=36, sequenceid=18, filesize=10.9 K 2024-12-05T13:44:55,620 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/ns/88db7e5db22d4b5a85fa75b041d923d7 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/ns/88db7e5db22d4b5a85fa75b041d923d7 2024-12-05T13:44:55,626 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/ns/88db7e5db22d4b5a85fa75b041d923d7, entries=2, sequenceid=18, filesize=5.0 K 2024-12-05T13:44:55,627 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/table/329a41f48333420486333cfe5b0283c9 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/table/329a41f48333420486333cfe5b0283c9 2024-12-05T13:44:55,633 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/table/329a41f48333420486333cfe5b0283c9, entries=2, sequenceid=18, filesize=5.3 K 2024-12-05T13:44:55,633 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=18, compaction requested=false; wal=null 2024-12-05T13:44:55,634 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-05T13:44:55,635 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T13:44:55,636 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T13:44:55,636 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:44:55,638 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T13:44:55,639 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/18.seqid, newMaxSeqId=18, maxSeqId=1 2024-12-05T13:44:55,641 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=19; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59777264, jitterRate=-0.10924935340881348}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:44:55,641 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T13:44:55,641 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733406295517Writing region info on filesystem at 1733406295517Initializing all the Stores at 1733406295518 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406295518Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406295518Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406295518Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406295518Obtaining lock to block concurrent updates at 1733406295531 (+13 ms)Preparing flush snapshotting stores in 1588230740 at 1733406295531Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=5811, getHeapSize=9832, getOffHeapSize=0, getCellsCount=40 at 1733406295531Flushing stores of hbase:meta,,1.1588230740 at 1733406295531Flushing 1588230740/info: creating writer at 1733406295532 (+1 ms)Flushing 1588230740/info: appending metadata at 1733406295548 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733406295548Flushing 1588230740/ns: creating writer at 1733406295562 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733406295573 (+11 ms)Flushing 1588230740/ns: closing flushed file at 1733406295573Flushing 1588230740/table: creating writer at 1733406295586 (+13 ms)Flushing 1588230740/table: appending metadata at 1733406295598 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733406295598Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7527a2a7: reopening flushed file at 1733406295611 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63898504: reopening flushed file at 1733406295618 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79563c6e: reopening flushed file at 1733406295626 (+8 ms)Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=18, compaction requested=false; wal=null at 1733406295633 (+7 ms)Cleaning up temporary data from old regions at 1733406295636 (+3 ms)Running coprocessor post-open hooks at 1733406295641 (+5 ms)Region opened successfully at 1733406295641 2024-12-05T13:44:55,642 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=17, masterSystemTime=1733406295490 2024-12-05T13:44:55,645 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T13:44:55,645 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T13:44:55,645 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=19, regionLocation=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:55,646 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,42407,1733406278528, state=OPEN 2024-12-05T13:44:55,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:55,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:55,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:44:55,654 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:55,654 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:55,654 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:44:55,654 DEBUG [PEWorker-1 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=17, ppid=16, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:55,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-12-05T13:44:55,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=da6aa2204f50,42407,1733406278528 in 317 msec 2024-12-05T13:44:55,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-12-05T13:44:55,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 485 msec 2024-12-05T13:44:55,660 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(207): da6aa2204f50,34361,1733406278491 had 2 regions 2024-12-05T13:44:55,661 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure da6aa2204f50,34361,1733406278491, splitWal=true, meta=true, isMeta: false 2024-12-05T13:44:55,663 INFO [PEWorker-4 {}] master.SplitWALManager(105): da6aa2204f50,34361,1733406278491 WAL count=1, meta=false 2024-12-05T13:44:55,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure da6aa2204f50%2C34361%2C1733406278491.1733406279949}] 2024-12-05T13:44:55,665 DEBUG [PEWorker-2 {}] master.SplitWALManager(158): Acquired split WAL worker=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:55,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure da6aa2204f50%2C34361%2C1733406278491.1733406279949, worker=da6aa2204f50,42407,1733406278528}] 2024-12-05T13:44:55,747 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:44:55,748 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,42407,1733406278528, seqNum=-1] 2024-12-05T13:44:55,749 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:44:55,752 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41536, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:44:55,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42407 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=19 2024-12-05T13:44:55,835 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949, size=1.6 K (1677bytes) 2024-12-05T13:44:55,835 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949 2024-12-05T13:44:55,836 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949 after 1ms 2024-12-05T13:44:55,838 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:55,839 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(310): Open hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949 took 4ms 2024-12-05T13:44:55,844 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(352): Last flushed sequenceid for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-12-05T13:44:55,844 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949 so closing down 2024-12-05T13:44:55,844 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:55,848 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:55,848 INFO [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 7 ms; skipped=6; WAL=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949, size=1.6 K, length=1677, corrupted=false, cancelled=false 2024-12-05T13:44:55,848 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949, journal: Splitting hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949, size=1.6 K (1677bytes) at 1733406295835Finishing writing output for hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949 so closing down at 1733406295844 (+9 ms)3 split writer threads finished at 1733406295848 (+4 ms)Processed 6 edits across 0 Regions in 7 ms; skipped=6; WAL=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949, size=1.6 K, length=1677, corrupted=false, cancelled=false at 1733406295848 2024-12-05T13:44:55,848 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949 2024-12-05T13:44:55,848 DEBUG [RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0-1 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-05T13:44:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster(4169): Remote procedure done, pid=19 2024-12-05T13:44:55,853 INFO [PEWorker-1 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting/da6aa2204f50%2C34361%2C1733406278491.1733406279949 to hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs 2024-12-05T13:44:55,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-05T13:44:55,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure da6aa2204f50%2C34361%2C1733406278491.1733406279949, worker=da6aa2204f50,42407,1733406278528 in 187 msec 2024-12-05T13:44:55,859 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=18, resume processing ppid=13 2024-12-05T13:44:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure da6aa2204f50%2C34361%2C1733406278491.1733406279949, worker=da6aa2204f50,42407,1733406278528 in 196 msec 2024-12-05T13:44:55,866 INFO [PEWorker-4 {}] master.SplitLogManager(171): hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/WALs/da6aa2204f50,34361,1733406278491-splitting dir is empty, no logs to split. 2024-12-05T13:44:55,866 INFO [PEWorker-4 {}] master.SplitWALManager(105): da6aa2204f50,34361,1733406278491 WAL count=0, meta=false 2024-12-05T13:44:55,866 DEBUG [PEWorker-4 {}] procedure.ServerCrashProcedure(329): Check if da6aa2204f50,34361,1733406278491 WAL splitting is done? wals=0, meta=false 2024-12-05T13:44:55,868 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.AbstractRpcClient(357): Not trying to connect to da6aa2204f50:34361 this server is in the failed servers list 2024-12-05T13:44:55,868 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=12, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=da6aa2204f50:34361 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: da6aa2204f50:34361 2024-12-05T13:44:55,868 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=12 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: da6aa2204f50:34361 2024-12-05T13:44:55,868 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=12 from cache 2024-12-05T13:44:55,870 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN}] 2024-12-05T13:44:55,872 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN 2024-12-05T13:44:55,873 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-05T13:44:56,024 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(204): Hosts are {da6aa2204f50=0} racks are {/default-rack=0} 2024-12-05T13:44:56,025 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T13:44:56,025 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T13:44:56,025 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T13:44:56,025 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T13:44:56,025 INFO [da6aa2204f50:39625 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T13:44:56,025 INFO [da6aa2204f50:39625 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T13:44:56,025 DEBUG [da6aa2204f50:39625 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T13:44:56,025 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPENING, regionLocation=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:56,027 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server da6aa2204f50:34361 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: da6aa2204f50/172.17.0.2:34361 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:56,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1, error=java.net.ConnectException: Call to address=da6aa2204f50:34361 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: da6aa2204f50/172.17.0.2:34361 2024-12-05T13:44:56,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 is java.net.ConnectException: finishConnect(..) failed: Connection refused 2024-12-05T13:44:56,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,34361,1733406278491, seqNum=-1 from cache 2024-12-05T13:44:56,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.FailedServers(52): Added failed server with address da6aa2204f50:34361 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: da6aa2204f50/172.17.0.2:34361 2024-12-05T13:44:56,083 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=18] 2024-12-05T13:44:56,083 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.AbstractRpcClient(357): Not trying to connect to da6aa2204f50:34361 this server is in the failed servers list 2024-12-05T13:44:56,084 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=da6aa2204f50:34361 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: da6aa2204f50:34361 2024-12-05T13:44:56,084 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: da6aa2204f50:34361 2024-12-05T13:44:56,084 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,34361,1733406278491, seqNum=18 from cache 2024-12-05T13:44:56,137 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:44:56,137 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,42407,1733406278528, seqNum=-1] 2024-12-05T13:44:56,138 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:44:56,139 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43617, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:44:56,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN because future has completed 2024-12-05T13:44:56,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,42407,1733406278528}] 2024-12-05T13:44:56,303 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:56,303 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:56,304 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,304 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:56,304 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,304 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,306 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,307 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf1 2024-12-05T13:44:56,307 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,315 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1/327c8da8c58047c6850e145bd74b347e 2024-12-05T13:44:56,315 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,315 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,316 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff columnFamilyName cf2 2024-12-05T13:44:56,316 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,324 DEBUG [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2/7172fca40fd54de985aee82179f2bce7 2024-12-05T13:44:56,324 INFO [StoreOpener-2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff-1 {}] regionserver.HStore(327): Store=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,325 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,325 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,327 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,327 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,328 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,328 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-05T13:44:56,329 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,330 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74703579, jitterRate=0.1131700724363327}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-05T13:44:56,330 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:44:56,331 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Running coprocessor pre-open hook at 1733406296304Writing region info on filesystem at 1733406296304Initializing all the Stores at 1733406296305 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296305Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296306 (+1 ms)Cleaning up temporary data from old regions at 1733406296328 (+22 ms)Running coprocessor post-open hooks at 1733406296330 (+2 ms)Region opened successfully at 1733406296331 (+1 ms) 2024-12-05T13:44:56,333 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., pid=21, masterSystemTime=1733406296295 2024-12-05T13:44:56,335 DEBUG [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:56,336 INFO [RS_OPEN_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:44:56,336 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, regionState=OPEN, openSeqNum=18, regionLocation=da6aa2204f50,42407,1733406278528 2024-12-05T13:44:56,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,42407,1733406278528 because future has completed 2024-12-05T13:44:56,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=20 2024-12-05T13:44:56,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=20, state=SUCCESS, hasLock=false; OpenRegionProcedure 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, server=da6aa2204f50,42407,1733406278528 in 198 msec 2024-12-05T13:44:56,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=13 2024-12-05T13:44:56,346 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(291): removed crashed server da6aa2204f50,34361,1733406278491 after splitting done 2024-12-05T13:44:56,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, ASSIGN in 473 msec 2024-12-05T13:44:56,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure da6aa2204f50,34361,1733406278491, splitWal=true, meta=true in 1.4140 sec 2024-12-05T13:44:56,392 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff., hostname=da6aa2204f50,42407,1733406278528, seqNum=18] 2024-12-05T13:44:56,406 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsAfterRegionMovedWithMultiCF Thread=413 (was 415), OpenFileDescriptor=1050 (was 1022) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=327 (was 347), ProcessCount=11 (was 11), AvailableMemoryMB=8345 (was 8556) 2024-12-05T13:44:56,408 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1050 is superior to 1024 2024-12-05T13:44:56,419 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsAfterPartialFlush Thread=413, OpenFileDescriptor=1050, MaxFileDescriptor=1048576, SystemLoadAverage=327, ProcessCount=11, AvailableMemoryMB=8344 2024-12-05T13:44:56,419 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1050 is superior to 1024 2024-12-05T13:44:56,432 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:56,434 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:56,434 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:56,437 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-22702140, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-22702140, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:56,448 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-22702140/hregion-22702140.1733406296438, exclude list is [], retry=0 2024-12-05T13:44:56,451 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:56,452 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:56,452 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:56,454 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-22702140/hregion-22702140.1733406296438 2024-12-05T13:44:56,454 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:56,454 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => cecbb0b7682f5cdcf50308fa1b22e7de, NAME => 'testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:44:56,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741898_1076 (size=67) 2024-12-05T13:44:56,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741898_1076 (size=67) 2024-12-05T13:44:56,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741898_1076 (size=67) 2024-12-05T13:44:56,465 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:56,466 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,467 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName a 2024-12-05T13:44:56,468 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,468 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,468 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,469 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName b 2024-12-05T13:44:56,469 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,470 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,470 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,471 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName c 2024-12-05T13:44:56,471 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,471 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,471 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,472 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,472 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,473 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,473 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,474 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:56,475 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,477 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:56,478 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened cecbb0b7682f5cdcf50308fa1b22e7de; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74597383, jitterRate=0.11158762872219086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:56,478 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for cecbb0b7682f5cdcf50308fa1b22e7de: Writing region info on filesystem at 1733406296465Initializing all the Stores at 1733406296465Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296466 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296466Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296466Cleaning up temporary data from old regions at 1733406296473 (+7 ms)Region opened successfully at 1733406296478 (+5 ms) 2024-12-05T13:44:56,478 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing cecbb0b7682f5cdcf50308fa1b22e7de, disabling compactions & flushes 2024-12-05T13:44:56,478 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,478 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,479 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. after waiting 0 ms 2024-12-05T13:44:56,479 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,479 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,479 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for cecbb0b7682f5cdcf50308fa1b22e7de: Waiting for close lock at 1733406296478Disabling compacts and flushes for region at 1733406296478Disabling writes for close at 1733406296479 (+1 ms)Writing region close event to WAL at 1733406296479Closed at 1733406296479 2024-12-05T13:44:56,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741897_1075 (size=93) 2024-12-05T13:44:56,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741897_1075 (size=93) 2024-12-05T13:44:56,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741897_1075 (size=93) 2024-12-05T13:44:56,484 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:56,484 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-22702140:(num 1733406296438) 2024-12-05T13:44:56,484 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:56,485 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:56,498 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485, exclude list is [], retry=0 2024-12-05T13:44:56,501 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:56,501 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:56,501 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:56,503 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485 2024-12-05T13:44:56,503 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:56,504 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => cecbb0b7682f5cdcf50308fa1b22e7de, NAME => 'testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:56,504 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:56,504 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,504 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,506 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,506 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName a 2024-12-05T13:44:56,506 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,507 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,507 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,508 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName b 2024-12-05T13:44:56,508 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,508 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,508 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,509 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName c 2024-12-05T13:44:56,509 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,509 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,509 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,510 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,510 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,511 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,511 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,512 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:56,513 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,513 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened cecbb0b7682f5cdcf50308fa1b22e7de; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67257228, jitterRate=0.0022107958793640137}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:56,514 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for cecbb0b7682f5cdcf50308fa1b22e7de: Writing region info on filesystem at 1733406296504Initializing all the Stores at 1733406296505 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296505Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296505Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296505Cleaning up temporary data from old regions at 1733406296511 (+6 ms)Region opened successfully at 1733406296514 (+3 ms) 2024-12-05T13:44:56,540 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing cecbb0b7682f5cdcf50308fa1b22e7de 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-05T13:44:56,561 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/a/a88839a1120d43c39a6eabc0362df093 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733406296514/Put/seqid=0 2024-12-05T13:44:56,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741900_1078 (size=5958) 2024-12-05T13:44:56,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741900_1078 (size=5958) 2024-12-05T13:44:56,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741900_1078 (size=5958) 2024-12-05T13:44:56,568 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/a/a88839a1120d43c39a6eabc0362df093 2024-12-05T13:44:56,587 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/b/50af583f62a843e49e6034a2b1bef3ee is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733406296523/Put/seqid=0 2024-12-05T13:44:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741901_1079 (size=5958) 2024-12-05T13:44:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741901_1079 (size=5958) 2024-12-05T13:44:56,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741901_1079 (size=5958) 2024-12-05T13:44:56,594 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/b/50af583f62a843e49e6034a2b1bef3ee 2024-12-05T13:44:56,613 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/c/3ec7c0572c054659b5ec9bd6feee2642 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733406296532/Put/seqid=0 2024-12-05T13:44:56,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741902_1080 (size=5958) 2024-12-05T13:44:56,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741902_1080 (size=5958) 2024-12-05T13:44:56,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741902_1080 (size=5958) 2024-12-05T13:44:56,620 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/c/3ec7c0572c054659b5ec9bd6feee2642 2024-12-05T13:44:56,626 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/a/a88839a1120d43c39a6eabc0362df093 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/a/a88839a1120d43c39a6eabc0362df093 2024-12-05T13:44:56,631 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/a/a88839a1120d43c39a6eabc0362df093, entries=10, sequenceid=33, filesize=5.8 K 2024-12-05T13:44:56,632 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/b/50af583f62a843e49e6034a2b1bef3ee as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/b/50af583f62a843e49e6034a2b1bef3ee 2024-12-05T13:44:56,638 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/b/50af583f62a843e49e6034a2b1bef3ee, entries=10, sequenceid=33, filesize=5.8 K 2024-12-05T13:44:56,639 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/c/3ec7c0572c054659b5ec9bd6feee2642 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/c/3ec7c0572c054659b5ec9bd6feee2642 2024-12-05T13:44:56,645 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/c/3ec7c0572c054659b5ec9bd6feee2642, entries=10, sequenceid=33, filesize=5.8 K 2024-12-05T13:44:56,646 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for cecbb0b7682f5cdcf50308fa1b22e7de in 106ms, sequenceid=33, compaction requested=false 2024-12-05T13:44:56,646 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for cecbb0b7682f5cdcf50308fa1b22e7de: 2024-12-05T13:44:56,647 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing cecbb0b7682f5cdcf50308fa1b22e7de, disabling compactions & flushes 2024-12-05T13:44:56,647 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,647 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,647 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. after waiting 0 ms 2024-12-05T13:44:56,647 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,659 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. 2024-12-05T13:44:56,660 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for cecbb0b7682f5cdcf50308fa1b22e7de: Waiting for close lock at 1733406296646Disabling compacts and flushes for region at 1733406296646Disabling writes for close at 1733406296647 (+1 ms)Writing region close event to WAL at 1733406296659 (+12 ms)Closed at 1733406296659 2024-12-05T13:44:56,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741899_1077 (size=2873) 2024-12-05T13:44:56,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741899_1077 (size=2873) 2024-12-05T13:44:56,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741899_1077 (size=2873) 2024-12-05T13:44:56,670 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/b/50af583f62a843e49e6034a2b1bef3ee to hdfs://localhost:34105/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/b/50af583f62a843e49e6034a2b1bef3ee 2024-12-05T13:44:56,685 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485, size=2.8 K (2873bytes) 2024-12-05T13:44:56,685 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485 2024-12-05T13:44:56,686 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485 after 1ms 2024-12-05T13:44:56,688 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:56,689 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485 took 4ms 2024-12-05T13:44:56,692 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485 so closing down 2024-12-05T13:44:56,692 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:44:56,694 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733406296485.temp 2024-12-05T13:44:56,696 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000003-wal.1733406296485.temp 2024-12-05T13:44:56,696 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:44:56,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741903_1081 (size=2312) 2024-12-05T13:44:56,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741903_1081 (size=2312) 2024-12-05T13:44:56,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741903_1081 (size=2312) 2024-12-05T13:44:56,721 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000003-wal.1733406296485.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-05T13:44:56,724 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000003-wal.1733406296485.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000032 2024-12-05T13:44:56,724 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 34 ms; skipped=2; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485, size=2.8 K, length=2873, corrupted=false, cancelled=false 2024-12-05T13:44:56,724 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485, journal: Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485, size=2.8 K (2873bytes) at 1733406296685Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485 so closing down at 1733406296692 (+7 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000003-wal.1733406296485.temp at 1733406296696 (+4 ms)3 split writer threads finished at 1733406296696Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000003-wal.1733406296485.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733406296721 (+25 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000003-wal.1733406296485.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000032 at 1733406296724 (+3 ms)Processed 32 edits across 1 Regions in 34 ms; skipped=2; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485, size=2.8 K, length=2873, corrupted=false, cancelled=false at 1733406296724 2024-12-05T13:44:56,727 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296485 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406296485 2024-12-05T13:44:56,728 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000032 2024-12-05T13:44:56,729 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:56,730 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:56,749 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296731, exclude list is [], retry=0 2024-12-05T13:44:56,752 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:56,754 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:56,755 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:56,759 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296731 2024-12-05T13:44:56,759 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:56,759 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => cecbb0b7682f5cdcf50308fa1b22e7de, NAME => 'testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:56,760 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:56,760 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,760 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,762 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,763 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName a 2024-12-05T13:44:56,763 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,772 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/a/a88839a1120d43c39a6eabc0362df093 2024-12-05T13:44:56,772 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,773 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,775 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName b 2024-12-05T13:44:56,775 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,777 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,777 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,779 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cecbb0b7682f5cdcf50308fa1b22e7de columnFamilyName c 2024-12-05T13:44:56,779 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,788 DEBUG [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/c/3ec7c0572c054659b5ec9bd6feee2642 2024-12-05T13:44:56,788 INFO [StoreOpener-cecbb0b7682f5cdcf50308fa1b22e7de-1 {}] regionserver.HStore(327): Store=cecbb0b7682f5cdcf50308fa1b22e7de/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,788 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,790 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,793 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,795 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000032 2024-12-05T13:44:56,798 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:44:56,799 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000032 2024-12-05T13:44:56,800 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing cecbb0b7682f5cdcf50308fa1b22e7de 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-05T13:44:56,818 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/b/41af4cdf95da472dbd372401b7f17324 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733406296523/Put/seqid=0 2024-12-05T13:44:56,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741905_1083 (size=5958) 2024-12-05T13:44:56,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741905_1083 (size=5958) 2024-12-05T13:44:56,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741905_1083 (size=5958) 2024-12-05T13:44:56,831 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/b/41af4cdf95da472dbd372401b7f17324 2024-12-05T13:44:56,837 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/.tmp/b/41af4cdf95da472dbd372401b7f17324 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/b/41af4cdf95da472dbd372401b7f17324 2024-12-05T13:44:56,843 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/b/41af4cdf95da472dbd372401b7f17324, entries=10, sequenceid=32, filesize=5.8 K 2024-12-05T13:44:56,844 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for cecbb0b7682f5cdcf50308fa1b22e7de in 44ms, sequenceid=32, compaction requested=false; wal=null 2024-12-05T13:44:56,845 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/0000000000000000032 2024-12-05T13:44:56,846 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,846 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,847 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:56,849 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for cecbb0b7682f5cdcf50308fa1b22e7de 2024-12-05T13:44:56,851 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/cecbb0b7682f5cdcf50308fa1b22e7de/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-12-05T13:44:56,852 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened cecbb0b7682f5cdcf50308fa1b22e7de; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62742162, jitterRate=-0.06506893038749695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:56,853 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for cecbb0b7682f5cdcf50308fa1b22e7de: Writing region info on filesystem at 1733406296760Initializing all the Stores at 1733406296761 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296761Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296761Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296761Obtaining lock to block concurrent updates at 1733406296800 (+39 ms)Preparing flush snapshotting stores in cecbb0b7682f5cdcf50308fa1b22e7de at 1733406296800Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1733406296800Flushing stores of testReplayEditsWrittenViaHRegion,,1733406296432.cecbb0b7682f5cdcf50308fa1b22e7de. at 1733406296800Flushing cecbb0b7682f5cdcf50308fa1b22e7de/b: creating writer at 1733406296800Flushing cecbb0b7682f5cdcf50308fa1b22e7de/b: appending metadata at 1733406296817 (+17 ms)Flushing cecbb0b7682f5cdcf50308fa1b22e7de/b: closing flushed file at 1733406296818 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e8d286b: reopening flushed file at 1733406296836 (+18 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for cecbb0b7682f5cdcf50308fa1b22e7de in 44ms, sequenceid=32, compaction requested=false; wal=null at 1733406296844 (+8 ms)Cleaning up temporary data from old regions at 1733406296846 (+2 ms)Region opened successfully at 1733406296853 (+7 ms) 2024-12-05T13:44:56,876 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsAfterPartialFlush Thread=420 (was 413) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:35052 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:33228 [Waiting for operation #39] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:53062 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:34876 [Waiting for operation #48] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:33356 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:52872 [Waiting for operation #36] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1124 (was 1050) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=327 (was 327), ProcessCount=11 (was 11), AvailableMemoryMB=8323 (was 8344) 2024-12-05T13:44:56,876 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1124 is superior to 1024 2024-12-05T13:44:56,891 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsAfterAbortingFlush Thread=420, OpenFileDescriptor=1124, MaxFileDescriptor=1048576, SystemLoadAverage=327, ProcessCount=11, AvailableMemoryMB=8322 2024-12-05T13:44:56,891 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1124 is superior to 1024 2024-12-05T13:44:56,909 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:56,911 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:44:56,912 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:44:56,914 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-37916741, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-37916741, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:56,925 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-37916741/hregion-37916741.1733406296915, exclude list is [], retry=0 2024-12-05T13:44:56,928 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:56,928 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:56,928 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:56,931 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-37916741/hregion-37916741.1733406296915 2024-12-05T13:44:56,935 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:44:56,935 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 50341aac79fe297d81d88ea21e446916, NAME => 'testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:44:56,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741907_1085 (size=68) 2024-12-05T13:44:56,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741907_1085 (size=68) 2024-12-05T13:44:56,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741907_1085 (size=68) 2024-12-05T13:44:56,950 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:56,951 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,953 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName a 2024-12-05T13:44:56,953 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,954 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,954 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,956 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName b 2024-12-05T13:44:56,956 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,957 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,957 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,959 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName c 2024-12-05T13:44:56,959 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:56,960 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:56,960 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,961 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,961 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,963 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,963 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,963 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:56,965 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:56,967 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:44:56,967 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 50341aac79fe297d81d88ea21e446916; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67841372, jitterRate=0.010915219783782959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:56,968 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 50341aac79fe297d81d88ea21e446916: Writing region info on filesystem at 1733406296950Initializing all the Stores at 1733406296951 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296951Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296951Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406296951Cleaning up temporary data from old regions at 1733406296963 (+12 ms)Region opened successfully at 1733406296968 (+5 ms) 2024-12-05T13:44:56,968 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 50341aac79fe297d81d88ea21e446916, disabling compactions & flushes 2024-12-05T13:44:56,968 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:44:56,968 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:44:56,968 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. after waiting 0 ms 2024-12-05T13:44:56,969 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:44:56,969 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:44:56,969 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 50341aac79fe297d81d88ea21e446916: Waiting for close lock at 1733406296968Disabling compacts and flushes for region at 1733406296968Disabling writes for close at 1733406296969 (+1 ms)Writing region close event to WAL at 1733406296969Closed at 1733406296969 2024-12-05T13:44:56,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741906_1084 (size=93) 2024-12-05T13:44:56,972 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-37916741/hregion-37916741.1733406296915 not finished, retry = 0 2024-12-05T13:44:56,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741906_1084 (size=93) 2024-12-05T13:44:56,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741906_1084 (size=93) 2024-12-05T13:44:57,075 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:44:57,075 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-37916741:(num 1733406296915) 2024-12-05T13:44:57,075 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:44:57,077 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:44:57,089 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078, exclude list is [], retry=0 2024-12-05T13:44:57,092 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:44:57,093 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:44:57,093 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:44:57,096 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078 2024-12-05T13:44:57,096 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:44:57,160 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 50341aac79fe297d81d88ea21e446916, NAME => 'testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:44:57,163 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,163 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:44:57,163 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,163 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,165 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,166 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName a 2024-12-05T13:44:57,166 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:57,167 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:57,167 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,168 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName b 2024-12-05T13:44:57,168 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:57,169 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:57,169 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,170 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName c 2024-12-05T13:44:57,170 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:44:57,171 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:44:57,171 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,172 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,174 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,175 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,175 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,176 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:44:57,177 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,178 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 50341aac79fe297d81d88ea21e446916; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69160570, jitterRate=0.0305728018283844}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:44:57,178 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:44:57,179 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 50341aac79fe297d81d88ea21e446916: Running coprocessor pre-open hook at 1733406297163Writing region info on filesystem at 1733406297163Initializing all the Stores at 1733406297164 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406297164Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406297165 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406297165Cleaning up temporary data from old regions at 1733406297175 (+10 ms)Running coprocessor post-open hooks at 1733406297178 (+3 ms)Region opened successfully at 1733406297179 (+1 ms) 2024-12-05T13:44:57,195 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 50341aac79fe297d81d88ea21e446916 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-12-05T13:44:57,197 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:58,197 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:44:58,281 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-05T13:44:58,327 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T13:44:58,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-12-05T13:44:58,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:58,329 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-12-05T13:44:58,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:58,329 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-12-05T13:44:58,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:58,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-12-05T13:44:58,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-12-05T13:44:59,198 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:00,199 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:01,201 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:02,202 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:03,203 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:04,205 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:05,206 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:06,207 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 50341aac79fe297d81d88ea21e446916/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:06,209 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 50341aac79fe297d81d88ea21e446916: 2024-12-05T13:45:06,209 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:45:06,225 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 50341aac79fe297d81d88ea21e446916: 2024-12-05T13:45:06,225 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-12-05T13:45:06,225 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 50341aac79fe297d81d88ea21e446916, disabling compactions & flushes 2024-12-05T13:45:06,225 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:45:06,225 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:45:06,225 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. after waiting 0 ms 2024-12-05T13:45:06,225 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:45:06,226 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:45:06,226 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. 2024-12-05T13:45:06,226 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 50341aac79fe297d81d88ea21e446916: Waiting for close lock at 1733406306225Running coprocessor pre-close hooks at 1733406306225Disabling compacts and flushes for region at 1733406306225Disabling writes for close at 1733406306225Writing region close event to WAL at 1733406306225Running coprocessor post-close hooks at 1733406306226 (+1 ms)Closed at 1733406306226 2024-12-05T13:45:06,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741908_1086 (size=2398) 2024-12-05T13:45:06,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741908_1086 (size=2398) 2024-12-05T13:45:06,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741908_1086 (size=2398) 2024-12-05T13:45:06,243 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078, size=2.3 K (2398bytes) 2024-12-05T13:45:06,243 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078 2024-12-05T13:45:06,243 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078 after 0ms 2024-12-05T13:45:06,246 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:06,246 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078 took 3ms 2024-12-05T13:45:06,248 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078 so closing down 2024-12-05T13:45:06,248 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:45:06,249 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1733406297078.temp 2024-12-05T13:45:06,251 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000004-wal.1733406297078.temp 2024-12-05T13:45:06,251 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:45:06,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741909_1087 (size=1672) 2024-12-05T13:45:06,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741909_1087 (size=1672) 2024-12-05T13:45:06,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741909_1087 (size=1672) 2024-12-05T13:45:06,257 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000004-wal.1733406297078.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-12-05T13:45:06,259 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000004-wal.1733406297078.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000026 2024-12-05T13:45:06,259 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 13 ms; skipped=3; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078, size=2.3 K, length=2398, corrupted=false, cancelled=false 2024-12-05T13:45:06,259 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078, journal: Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078, size=2.3 K (2398bytes) at 1733406306243Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078 so closing down at 1733406306248 (+5 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000004-wal.1733406297078.temp at 1733406306251 (+3 ms)3 split writer threads finished at 1733406306251Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000004-wal.1733406297078.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1733406306257 (+6 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000004-wal.1733406297078.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000026 at 1733406306259 (+2 ms)Processed 23 edits across 1 Regions in 13 ms; skipped=3; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078, size=2.3 K, length=2398, corrupted=false, cancelled=false at 1733406306259 2024-12-05T13:45:06,261 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406297078 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406297078 2024-12-05T13:45:06,262 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000026 2024-12-05T13:45:06,262 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:06,264 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:06,280 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406306264, exclude list is [], retry=0 2024-12-05T13:45:06,282 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:06,283 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:06,283 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:06,285 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406306264 2024-12-05T13:45:06,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:45:06,286 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 50341aac79fe297d81d88ea21e446916, NAME => 'testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:06,286 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,286 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:06,287 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,287 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,289 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,290 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName a 2024-12-05T13:45:06,290 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:06,290 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:06,290 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,291 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName b 2024-12-05T13:45:06,291 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:06,292 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:06,292 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,293 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50341aac79fe297d81d88ea21e446916 columnFamilyName c 2024-12-05T13:45:06,293 DEBUG [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:06,293 INFO [StoreOpener-50341aac79fe297d81d88ea21e446916-1 {}] regionserver.HStore(327): Store=50341aac79fe297d81d88ea21e446916/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:06,293 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,294 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,295 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,296 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000026 2024-12-05T13:45:06,298 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:06,300 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000026 2024-12-05T13:45:06,300 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 50341aac79fe297d81d88ea21e446916 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-12-05T13:45:06,315 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/a/b412bb9656bb4d679e92a0598c0bff6d is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1733406306215/Put/seqid=0 2024-12-05T13:45:06,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741911_1089 (size=5523) 2024-12-05T13:45:06,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741911_1089 (size=5523) 2024-12-05T13:45:06,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741911_1089 (size=5523) 2024-12-05T13:45:06,322 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/a/b412bb9656bb4d679e92a0598c0bff6d 2024-12-05T13:45:06,341 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/b/73e7e20012b54ba3b5f67b4a835124af is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1733406306210/Put/seqid=0 2024-12-05T13:45:06,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741912_1090 (size=5524) 2024-12-05T13:45:06,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741912_1090 (size=5524) 2024-12-05T13:45:06,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741912_1090 (size=5524) 2024-12-05T13:45:06,348 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/b/73e7e20012b54ba3b5f67b4a835124af 2024-12-05T13:45:06,366 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/c/2cb13f22acd646469957b7756c0dc9c8 is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1733406306211/Put/seqid=0 2024-12-05T13:45:06,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741913_1091 (size=5457) 2024-12-05T13:45:06,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741913_1091 (size=5457) 2024-12-05T13:45:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741913_1091 (size=5457) 2024-12-05T13:45:06,373 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/c/2cb13f22acd646469957b7756c0dc9c8 2024-12-05T13:45:06,380 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/a/b412bb9656bb4d679e92a0598c0bff6d as hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/a/b412bb9656bb4d679e92a0598c0bff6d 2024-12-05T13:45:06,385 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/a/b412bb9656bb4d679e92a0598c0bff6d, entries=7, sequenceid=26, filesize=5.4 K 2024-12-05T13:45:06,386 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/b/73e7e20012b54ba3b5f67b4a835124af as hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/b/73e7e20012b54ba3b5f67b4a835124af 2024-12-05T13:45:06,391 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/b/73e7e20012b54ba3b5f67b4a835124af, entries=7, sequenceid=26, filesize=5.4 K 2024-12-05T13:45:06,392 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/.tmp/c/2cb13f22acd646469957b7756c0dc9c8 as hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/c/2cb13f22acd646469957b7756c0dc9c8 2024-12-05T13:45:06,397 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/c/2cb13f22acd646469957b7756c0dc9c8, entries=6, sequenceid=26, filesize=5.3 K 2024-12-05T13:45:06,397 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 50341aac79fe297d81d88ea21e446916 in 97ms, sequenceid=26, compaction requested=false; wal=null 2024-12-05T13:45:06,398 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/0000000000000000026 2024-12-05T13:45:06,400 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,400 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,400 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:06,402 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,404 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsAfterAbortingFlush/50341aac79fe297d81d88ea21e446916/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-12-05T13:45:06,405 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 50341aac79fe297d81d88ea21e446916; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63587850, jitterRate=-0.05246719717979431}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:06,405 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 50341aac79fe297d81d88ea21e446916 2024-12-05T13:45:06,406 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 50341aac79fe297d81d88ea21e446916: Running coprocessor pre-open hook at 1733406306287Writing region info on filesystem at 1733406306287Initializing all the Stores at 1733406306288 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406306288Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406306288Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406306288Obtaining lock to block concurrent updates at 1733406306300 (+12 ms)Preparing flush snapshotting stores in 50341aac79fe297d81d88ea21e446916 at 1733406306300Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1733406306300Flushing stores of testReplayEditsAfterAbortingFlush,,1733406296910.50341aac79fe297d81d88ea21e446916. at 1733406306300Flushing 50341aac79fe297d81d88ea21e446916/a: creating writer at 1733406306300Flushing 50341aac79fe297d81d88ea21e446916/a: appending metadata at 1733406306314 (+14 ms)Flushing 50341aac79fe297d81d88ea21e446916/a: closing flushed file at 1733406306314Flushing 50341aac79fe297d81d88ea21e446916/b: creating writer at 1733406306327 (+13 ms)Flushing 50341aac79fe297d81d88ea21e446916/b: appending metadata at 1733406306340 (+13 ms)Flushing 50341aac79fe297d81d88ea21e446916/b: closing flushed file at 1733406306340Flushing 50341aac79fe297d81d88ea21e446916/c: creating writer at 1733406306353 (+13 ms)Flushing 50341aac79fe297d81d88ea21e446916/c: appending metadata at 1733406306366 (+13 ms)Flushing 50341aac79fe297d81d88ea21e446916/c: closing flushed file at 1733406306366Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38c1fd6c: reopening flushed file at 1733406306379 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f4866be: reopening flushed file at 1733406306385 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43767a91: reopening flushed file at 1733406306391 (+6 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 50341aac79fe297d81d88ea21e446916 in 97ms, sequenceid=26, compaction requested=false; wal=null at 1733406306397 (+6 ms)Cleaning up temporary data from old regions at 1733406306400 (+3 ms)Running coprocessor post-open hooks at 1733406306405 (+5 ms)Region opened successfully at 1733406306406 (+1 ms) 2024-12-05T13:45:06,425 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsAfterAbortingFlush Thread=421 (was 420) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46140 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:49656 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:32908 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:49644 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:32920 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46114 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1182 (was 1124) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=277 (was 327), ProcessCount=11 (was 11), AvailableMemoryMB=8019 (was 8322) 2024-12-05T13:45:06,425 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1182 is superior to 1024 2024-12-05T13:45:06,436 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testDatalossWhenInputError Thread=421, OpenFileDescriptor=1182, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=8017 2024-12-05T13:45:06,436 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1182 is superior to 1024 2024-12-05T13:45:06,449 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:45:06,451 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:45:06,452 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:45:06,454 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-08360157, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-08360157, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:06,466 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-08360157/hregion-08360157.1733406306454, exclude list is [], retry=0 2024-12-05T13:45:06,468 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:06,468 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:06,469 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:06,470 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-08360157/hregion-08360157.1733406306454 2024-12-05T13:45:06,470 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:45:06,471 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 0c41c5123e4c0ba19553886327477261, NAME => 'testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:45:06,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741915_1093 (size=61) 2024-12-05T13:45:06,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741915_1093 (size=61) 2024-12-05T13:45:06,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741915_1093 (size=61) 2024-12-05T13:45:06,482 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:06,483 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,484 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c41c5123e4c0ba19553886327477261 columnFamilyName a 2024-12-05T13:45:06,485 DEBUG [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:06,485 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(327): Store=0c41c5123e4c0ba19553886327477261/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:06,485 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,486 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,486 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,487 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,487 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,488 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,490 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:45:06,491 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0c41c5123e4c0ba19553886327477261; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66627910, jitterRate=-0.007166773080825806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:45:06,491 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0c41c5123e4c0ba19553886327477261: Writing region info on filesystem at 1733406306482Initializing all the Stores at 1733406306483 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406306483Cleaning up temporary data from old regions at 1733406306487 (+4 ms)Region opened successfully at 1733406306491 (+4 ms) 2024-12-05T13:45:06,491 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 0c41c5123e4c0ba19553886327477261, disabling compactions & flushes 2024-12-05T13:45:06,492 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,492 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,492 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. after waiting 0 ms 2024-12-05T13:45:06,492 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,492 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,492 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 0c41c5123e4c0ba19553886327477261: Waiting for close lock at 1733406306491Disabling compacts and flushes for region at 1733406306491Disabling writes for close at 1733406306492 (+1 ms)Writing region close event to WAL at 1733406306492Closed at 1733406306492 2024-12-05T13:45:06,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741914_1092 (size=93) 2024-12-05T13:45:06,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741914_1092 (size=93) 2024-12-05T13:45:06,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741914_1092 (size=93) 2024-12-05T13:45:06,496 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:45:06,496 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-08360157:(num 1733406306454) 2024-12-05T13:45:06,496 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:06,498 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:06,510 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499, exclude list is [], retry=0 2024-12-05T13:45:06,513 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:06,513 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:06,513 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:06,514 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499 2024-12-05T13:45:06,515 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:45:06,515 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 0c41c5123e4c0ba19553886327477261, NAME => 'testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:06,515 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:06,515 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,515 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,517 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,518 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c41c5123e4c0ba19553886327477261 columnFamilyName a 2024-12-05T13:45:06,518 DEBUG [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:06,518 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(327): Store=0c41c5123e4c0ba19553886327477261/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:06,518 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,519 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,520 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,521 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,521 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,522 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,523 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0c41c5123e4c0ba19553886327477261; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64923119, jitterRate=-0.032570138573646545}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:45:06,524 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0c41c5123e4c0ba19553886327477261: Writing region info on filesystem at 1733406306515Initializing all the Stores at 1733406306516 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406306516Cleaning up temporary data from old regions at 1733406306521 (+5 ms)Region opened successfully at 1733406306524 (+3 ms) 2024-12-05T13:45:06,534 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 0c41c5123e4c0ba19553886327477261, disabling compactions & flushes 2024-12-05T13:45:06,534 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,534 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,534 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. after waiting 0 ms 2024-12-05T13:45:06,534 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,534 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,534 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. 2024-12-05T13:45:06,534 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 0c41c5123e4c0ba19553886327477261: Waiting for close lock at 1733406306534Disabling compacts and flushes for region at 1733406306534Disabling writes for close at 1733406306534Writing region close event to WAL at 1733406306534Closed at 1733406306534 2024-12-05T13:45:06,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741916_1094 (size=838) 2024-12-05T13:45:06,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741916_1094 (size=838) 2024-12-05T13:45:06,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741916_1094 (size=838) 2024-12-05T13:45:06,551 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499, size=838 (838bytes) 2024-12-05T13:45:06,551 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499 2024-12-05T13:45:06,552 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499 after 1ms 2024-12-05T13:45:06,554 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:06,555 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499 took 4ms 2024-12-05T13:45:06,557 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499 so closing down 2024-12-05T13:45:06,557 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:45:06,558 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733406306499.temp 2024-12-05T13:45:06,559 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000003-wal.1733406306499.temp 2024-12-05T13:45:06,560 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:45:06,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741917_1095 (size=838) 2024-12-05T13:45:06,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741917_1095 (size=838) 2024-12-05T13:45:06,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741917_1095 (size=838) 2024-12-05T13:45:06,567 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000003-wal.1733406306499.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-12-05T13:45:06,568 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000003-wal.1733406306499.temp to hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012 2024-12-05T13:45:06,568 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 13 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499, size=838, length=838, corrupted=false, cancelled=false 2024-12-05T13:45:06,568 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499, journal: Splitting hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499, size=838 (838bytes) at 1733406306551Finishing writing output for hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499 so closing down at 1733406306557 (+6 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000003-wal.1733406306499.temp at 1733406306559 (+2 ms)3 split writer threads finished at 1733406306560 (+1 ms)Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000003-wal.1733406306499.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1733406306567 (+7 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000003-wal.1733406306499.temp to hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012 at 1733406306568 (+1 ms)Processed 10 edits across 1 Regions in 13 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499, size=838, length=838, corrupted=false, cancelled=false at 1733406306568 2024-12-05T13:45:06,570 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306499 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406306499 2024-12-05T13:45:06,570 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012 2024-12-05T13:45:06,573 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:06,822 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T13:45:06,892 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:06,894 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:06,904 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306894, exclude list is [], retry=0 2024-12-05T13:45:06,907 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:06,907 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:06,907 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:06,909 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306894 2024-12-05T13:45:06,909 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:45:06,909 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 0c41c5123e4c0ba19553886327477261, NAME => 'testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:06,909 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:06,909 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,909 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,912 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,913 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c41c5123e4c0ba19553886327477261 columnFamilyName a 2024-12-05T13:45:06,913 DEBUG [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:06,914 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(327): Store=0c41c5123e4c0ba19553886327477261/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:06,914 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,915 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,917 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,917 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012 2024-12-05T13:45:06,919 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:06,920 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012 2024-12-05T13:45:06,920 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0c41c5123e4c0ba19553886327477261 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-12-05T13:45:06,946 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/.tmp/a/523ec62825fe40cab36b77851ecfbbe6 is 79, key is testDatalossWhenInputError/a:x0/1733406306524/Put/seqid=0 2024-12-05T13:45:06,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741919_1097 (size=5808) 2024-12-05T13:45:06,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741919_1097 (size=5808) 2024-12-05T13:45:06,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741919_1097 (size=5808) 2024-12-05T13:45:06,952 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/.tmp/a/523ec62825fe40cab36b77851ecfbbe6 2024-12-05T13:45:06,963 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/.tmp/a/523ec62825fe40cab36b77851ecfbbe6 as hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/a/523ec62825fe40cab36b77851ecfbbe6 2024-12-05T13:45:06,973 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/a/523ec62825fe40cab36b77851ecfbbe6, entries=10, sequenceid=12, filesize=5.7 K 2024-12-05T13:45:06,974 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 0c41c5123e4c0ba19553886327477261 in 53ms, sequenceid=12, compaction requested=false; wal=null 2024-12-05T13:45:06,974 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/0000000000000000012 2024-12-05T13:45:06,976 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,976 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,978 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,980 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-05T13:45:06,981 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0c41c5123e4c0ba19553886327477261; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67325009, jitterRate=0.0032208114862442017}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:45:06,982 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0c41c5123e4c0ba19553886327477261: Writing region info on filesystem at 1733406306909Initializing all the Stores at 1733406306912 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406306912Obtaining lock to block concurrent updates at 1733406306920 (+8 ms)Preparing flush snapshotting stores in 0c41c5123e4c0ba19553886327477261 at 1733406306921 (+1 ms)Finished memstore snapshotting testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1733406306921Flushing stores of testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261. at 1733406306921Flushing 0c41c5123e4c0ba19553886327477261/a: creating writer at 1733406306921Flushing 0c41c5123e4c0ba19553886327477261/a: appending metadata at 1733406306945 (+24 ms)Flushing 0c41c5123e4c0ba19553886327477261/a: closing flushed file at 1733406306945Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57c7b219: reopening flushed file at 1733406306961 (+16 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 0c41c5123e4c0ba19553886327477261 in 53ms, sequenceid=12, compaction requested=false; wal=null at 1733406306974 (+13 ms)Cleaning up temporary data from old regions at 1733406306976 (+2 ms)Region opened successfully at 1733406306982 (+6 ms) 2024-12-05T13:45:06,985 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 0c41c5123e4c0ba19553886327477261, NAME => 'testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:06,985 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733406306449.0c41c5123e4c0ba19553886327477261.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:06,985 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,985 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,986 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,987 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c41c5123e4c0ba19553886327477261 columnFamilyName a 2024-12-05T13:45:06,987 DEBUG [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:06,993 DEBUG [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/a/523ec62825fe40cab36b77851ecfbbe6 2024-12-05T13:45:06,993 INFO [StoreOpener-0c41c5123e4c0ba19553886327477261-1 {}] regionserver.HStore(327): Store=0c41c5123e4c0ba19553886327477261/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:06,993 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,994 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,995 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,995 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,995 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,997 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0c41c5123e4c0ba19553886327477261 2024-12-05T13:45:06,999 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testDatalossWhenInputError/0c41c5123e4c0ba19553886327477261/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-12-05T13:45:07,000 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0c41c5123e4c0ba19553886327477261; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60928329, jitterRate=-0.09209714829921722}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T13:45:07,001 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0c41c5123e4c0ba19553886327477261: Writing region info on filesystem at 1733406306985Initializing all the Stores at 1733406306986 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406306986Cleaning up temporary data from old regions at 1733406306995 (+9 ms)Region opened successfully at 1733406307000 (+5 ms) 2024-12-05T13:45:07,017 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testDatalossWhenInputError Thread=431 (was 421) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46140 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:49656 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:32908 [Waiting for operation #23] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:49738 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46216 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:33002 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1806447417-172.17.0.2-1733406274111:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1264 (was 1182) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=277 (was 277), ProcessCount=11 (was 11), AvailableMemoryMB=7928 (was 8017) 2024-12-05T13:45:07,018 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1264 is superior to 1024 2024-12-05T13:45:07,030 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testCompactedBulkLoadedFiles Thread=431, OpenFileDescriptor=1264, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=7927 2024-12-05T13:45:07,030 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1264 is superior to 1024 2024-12-05T13:45:07,046 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:45:07,047 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:45:07,048 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:45:07,050 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-28223582, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-28223582, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:07,061 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-28223582/hregion-28223582.1733406307050, exclude list is [], retry=0 2024-12-05T13:45:07,064 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:07,064 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:07,065 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:07,066 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-28223582/hregion-28223582.1733406307050 2024-12-05T13:45:07,066 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:45:07,067 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => d2e4ca53683122d1e5c056bfac428eb6, NAME => 'testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:45:07,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741921_1099 (size=63) 2024-12-05T13:45:07,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741921_1099 (size=63) 2024-12-05T13:45:07,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741921_1099 (size=63) 2024-12-05T13:45:07,075 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:07,076 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,078 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName a 2024-12-05T13:45:07,078 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:07,078 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:07,078 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,080 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName b 2024-12-05T13:45:07,080 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:07,080 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:07,080 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,082 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName c 2024-12-05T13:45:07,082 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:07,082 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:07,082 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,083 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,084 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,085 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,085 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,086 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:07,087 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,091 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:45:07,091 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d2e4ca53683122d1e5c056bfac428eb6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63002129, jitterRate=-0.061195120215415955}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:07,092 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d2e4ca53683122d1e5c056bfac428eb6: Writing region info on filesystem at 1733406307075Initializing all the Stores at 1733406307076 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406307076Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406307076Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406307076Cleaning up temporary data from old regions at 1733406307085 (+9 ms)Region opened successfully at 1733406307092 (+7 ms) 2024-12-05T13:45:07,093 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d2e4ca53683122d1e5c056bfac428eb6, disabling compactions & flushes 2024-12-05T13:45:07,093 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:07,093 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:07,093 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. after waiting 0 ms 2024-12-05T13:45:07,093 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:07,094 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:07,094 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d2e4ca53683122d1e5c056bfac428eb6: Waiting for close lock at 1733406307093Disabling compacts and flushes for region at 1733406307093Disabling writes for close at 1733406307093Writing region close event to WAL at 1733406307093Closed at 1733406307093 2024-12-05T13:45:07,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741920_1098 (size=93) 2024-12-05T13:45:07,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741920_1098 (size=93) 2024-12-05T13:45:07,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741920_1098 (size=93) 2024-12-05T13:45:07,099 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:45:07,099 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-28223582:(num 1733406307050) 2024-12-05T13:45:07,099 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:07,101 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:07,114 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102, exclude list is [], retry=0 2024-12-05T13:45:07,117 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:07,117 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:07,118 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:07,119 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 2024-12-05T13:45:07,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:45:07,120 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d2e4ca53683122d1e5c056bfac428eb6, NAME => 'testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:07,120 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:07,120 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,120 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,121 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,122 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName a 2024-12-05T13:45:07,122 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:07,123 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:07,123 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,124 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName b 2024-12-05T13:45:07,124 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:07,125 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:07,125 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,126 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName c 2024-12-05T13:45:07,126 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:07,127 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:07,127 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,128 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,129 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,129 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,129 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,130 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:07,131 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:07,132 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d2e4ca53683122d1e5c056bfac428eb6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63587570, jitterRate=-0.05247136950492859}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:07,132 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d2e4ca53683122d1e5c056bfac428eb6: Writing region info on filesystem at 1733406307120Initializing all the Stores at 1733406307121 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406307121Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406307121Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406307121Cleaning up temporary data from old regions at 1733406307129 (+8 ms)Region opened successfully at 1733406307132 (+3 ms) 2024-12-05T13:45:07,136 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1733406307136/Put/seqid=0 2024-12-05T13:45:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741923_1101 (size=4875) 2024-12-05T13:45:07,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741923_1101 (size=4875) 2024-12-05T13:45:07,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741923_1101 (size=4875) 2024-12-05T13:45:07,150 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1733406307150/Put/seqid=0 2024-12-05T13:45:07,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741924_1102 (size=4875) 2024-12-05T13:45:07,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741924_1102 (size=4875) 2024-12-05T13:45:07,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741924_1102 (size=4875) 2024-12-05T13:45:07,159 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1733406307158/Put/seqid=0 2024-12-05T13:45:07,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741925_1103 (size=4875) 2024-12-05T13:45:07,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741925_1103 (size=4875) 2024-12-05T13:45:07,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741925_1103 (size=4875) 2024-12-05T13:45:07,165 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in d2e4ca53683122d1e5c056bfac428eb6/a 2024-12-05T13:45:07,168 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-12-05T13:45:07,168 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-05T13:45:07,168 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in d2e4ca53683122d1e5c056bfac428eb6/a 2024-12-05T13:45:07,172 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-12-05T13:45:07,172 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-05T13:45:07,172 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in d2e4ca53683122d1e5c056bfac428eb6/a 2024-12-05T13:45:07,176 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-12-05T13:45:07,176 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-05T13:45:07,176 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d2e4ca53683122d1e5c056bfac428eb6 3/3 column families, dataSize=51 B heapSize=896 B 2024-12-05T13:45:07,191 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/.tmp/a/459b6b1f0662447c8a1fc80764182dcf is 55, key is testCompactedBulkLoadedFiles/a:a/1733406307133/Put/seqid=0 2024-12-05T13:45:07,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741926_1104 (size=5107) 2024-12-05T13:45:07,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741926_1104 (size=5107) 2024-12-05T13:45:07,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741926_1104 (size=5107) 2024-12-05T13:45:07,198 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/.tmp/a/459b6b1f0662447c8a1fc80764182dcf 2024-12-05T13:45:07,204 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/.tmp/a/459b6b1f0662447c8a1fc80764182dcf as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf 2024-12-05T13:45:07,208 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf, entries=1, sequenceid=4, filesize=5.0 K 2024-12-05T13:45:07,209 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for d2e4ca53683122d1e5c056bfac428eb6 in 33ms, sequenceid=4, compaction requested=false 2024-12-05T13:45:07,209 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d2e4ca53683122d1e5c056bfac428eb6: 2024-12-05T13:45:07,211 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_ 2024-12-05T13:45:07,212 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_ 2024-12-05T13:45:07,212 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_ 2024-12-05T13:45:07,213 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile0 into d2e4ca53683122d1e5c056bfac428eb6/a as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_ - updating store file list. 2024-12-05T13:45:07,217 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for fd91583f4fe04af39c2606e46df2c735_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T13:45:07,217 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_ into d2e4ca53683122d1e5c056bfac428eb6/a 2024-12-05T13:45:07,217 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile0 into d2e4ca53683122d1e5c056bfac428eb6/a (new location: hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_) 2024-12-05T13:45:07,218 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile1 into d2e4ca53683122d1e5c056bfac428eb6/a as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_ - updating store file list. 2024-12-05T13:45:07,222 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for b2f0215a15e247f299c0e22869b7458f_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T13:45:07,222 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_ into d2e4ca53683122d1e5c056bfac428eb6/a 2024-12-05T13:45:07,222 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile1 into d2e4ca53683122d1e5c056bfac428eb6/a (new location: hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_) 2024-12-05T13:45:07,223 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile2 into d2e4ca53683122d1e5c056bfac428eb6/a as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_ - updating store file list. 2024-12-05T13:45:07,228 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 35940ec6121b4f628de32777370ee868_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T13:45:07,228 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_ into d2e4ca53683122d1e5c056bfac428eb6/a 2024-12-05T13:45:07,228 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34105/hbase/testCompactedBulkLoadedFiles/hfile2 into d2e4ca53683122d1e5c056bfac428eb6/a (new location: hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_) 2024-12-05T13:45:07,235 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-05T13:45:07,235 DEBUG [Time-limited test {}] regionserver.HStore(1541): d2e4ca53683122d1e5c056bfac428eb6/a is initiating major compaction (all files) 2024-12-05T13:45:07,235 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of d2e4ca53683122d1e5c056bfac428eb6/a in testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:07,236 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_] into tmpdir=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/.tmp, totalSize=19.3 K 2024-12-05T13:45:07,236 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 459b6b1f0662447c8a1fc80764182dcf, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1733406307133 2024-12-05T13:45:07,237 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting fd91583f4fe04af39c2606e46df2c735_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-05T13:45:07,237 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b2f0215a15e247f299c0e22869b7458f_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-05T13:45:07,237 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 35940ec6121b4f628de32777370ee868_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-05T13:45:07,249 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/.tmp/a/7b513ec6c960443785bd5e9ae7c37659 is 55, key is testCompactedBulkLoadedFiles/a:a/1733406307133/Put/seqid=0 2024-12-05T13:45:07,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741927_1105 (size=6154) 2024-12-05T13:45:07,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741927_1105 (size=6154) 2024-12-05T13:45:07,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741927_1105 (size=6154) 2024-12-05T13:45:07,262 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/.tmp/a/7b513ec6c960443785bd5e9ae7c37659 as hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/7b513ec6c960443785bd5e9ae7c37659 2024-12-05T13:45:07,268 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in d2e4ca53683122d1e5c056bfac428eb6/a of d2e4ca53683122d1e5c056bfac428eb6 into 7b513ec6c960443785bd5e9ae7c37659(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-05T13:45:07,268 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for d2e4ca53683122d1e5c056bfac428eb6: 2024-12-05T13:45:07,268 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-05T13:45:07,268 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-05T13:45:07,298 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102, size=0 (0bytes) 2024-12-05T13:45:07,298 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 might be still open, length is 0 2024-12-05T13:45:07,298 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 2024-12-05T13:45:07,298 WARN [IPC Server handler 3 on default port 34105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 has not been closed. Lease recovery is in progress. RecoveryId = 1106 for block blk_1073741922_1100 2024-12-05T13:45:07,299 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 after 1ms 2024-12-05T13:45:08,009 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-12-05T13:45:08,009 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-12-05T13:45:08,013 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-12-05T13:45:08,013 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-12-05T13:45:09,816 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:33048 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:43927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33048 dst: /127.0.0.1:43927 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43927 remote=/127.0.0.1:33048]. Total timeout mills is 60000, 57452 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:09,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:49774 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:38521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49774 dst: /127.0.0.1:38521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:09,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:46250 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:44137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46250 dst: /127.0.0.1:44137 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:09,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741922_1106 (size=1368) 2024-12-05T13:45:09,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741922_1106 (size=1368) 2024-12-05T13:45:11,300 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 after 4002ms 2024-12-05T13:45:11,305 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:11,305 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 took 4007ms 2024-12-05T13:45:11,307 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102; continuing. 2024-12-05T13:45:11,307 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 so closing down 2024-12-05T13:45:11,308 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:45:11,309 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733406307102.temp 2024-12-05T13:45:11,311 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000003-wal.1733406307102.temp 2024-12-05T13:45:11,311 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:45:11,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741928_1107 (size=635) 2024-12-05T13:45:11,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741928_1107 (size=635) 2024-12-05T13:45:11,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741928_1107 (size=635) 2024-12-05T13:45:11,319 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000003-wal.1733406307102.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-05T13:45:11,320 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000003-wal.1733406307102.temp to hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000008 2024-12-05T13:45:11,321 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102, size=0, length=0, corrupted=false, cancelled=false 2024-12-05T13:45:11,321 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102, journal: Splitting hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102, size=0 (0bytes) at 1733406307298Finishing writing output for hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 so closing down at 1733406311307 (+4009 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000003-wal.1733406307102.temp at 1733406311311 (+4 ms)3 split writer threads finished at 1733406311311Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000003-wal.1733406307102.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733406311319 (+8 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000003-wal.1733406307102.temp to hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000008 at 1733406311321 (+2 ms)Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102, size=0, length=0, corrupted=false, cancelled=false at 1733406311321 2024-12-05T13:45:11,322 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406307102 2024-12-05T13:45:11,323 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000008 2024-12-05T13:45:11,323 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:11,325 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:11,336 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406311325, exclude list is [], retry=0 2024-12-05T13:45:11,339 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:11,339 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:11,339 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:11,341 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406311325 2024-12-05T13:45:11,341 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:45:11,341 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d2e4ca53683122d1e5c056bfac428eb6, NAME => 'testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:11,341 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:11,341 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,342 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,346 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,347 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName a 2024-12-05T13:45:11,347 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,353 DEBUG [StoreFileOpener-d2e4ca53683122d1e5c056bfac428eb6-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 35940ec6121b4f628de32777370ee868_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T13:45:11,353 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_ 2024-12-05T13:45:11,356 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf 2024-12-05T13:45:11,360 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/7b513ec6c960443785bd5e9ae7c37659 2024-12-05T13:45:11,363 DEBUG [StoreFileOpener-d2e4ca53683122d1e5c056bfac428eb6-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for b2f0215a15e247f299c0e22869b7458f_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T13:45:11,363 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_ 2024-12-05T13:45:11,366 DEBUG [StoreFileOpener-d2e4ca53683122d1e5c056bfac428eb6-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for fd91583f4fe04af39c2606e46df2c735_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T13:45:11,367 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_ 2024-12-05T13:45:11,367 WARN [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@6f54cff7 2024-12-05T13:45:11,367 WARN [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@6f54cff7 2024-12-05T13:45:11,367 WARN [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@6f54cff7 2024-12-05T13:45:11,367 WARN [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@6f54cff7 2024-12-05T13:45:11,367 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_] to archive 2024-12-05T13:45:11,368 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-05T13:45:11,369 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_ to hdfs://localhost:34105/hbase/archive/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/35940ec6121b4f628de32777370ee868_SeqId_4_ 2024-12-05T13:45:11,371 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf to hdfs://localhost:34105/hbase/archive/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/459b6b1f0662447c8a1fc80764182dcf 2024-12-05T13:45:11,372 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_ to hdfs://localhost:34105/hbase/archive/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/b2f0215a15e247f299c0e22869b7458f_SeqId_4_ 2024-12-05T13:45:11,373 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_ to hdfs://localhost:34105/hbase/archive/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/a/fd91583f4fe04af39c2606e46df2c735_SeqId_4_ 2024-12-05T13:45:11,373 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,373 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,374 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName b 2024-12-05T13:45:11,374 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,374 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,374 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,375 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e4ca53683122d1e5c056bfac428eb6 columnFamilyName c 2024-12-05T13:45:11,375 DEBUG [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,375 INFO [StoreOpener-d2e4ca53683122d1e5c056bfac428eb6-1 {}] regionserver.HStore(327): Store=d2e4ca53683122d1e5c056bfac428eb6/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,375 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,376 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,377 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,378 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000008 2024-12-05T13:45:11,379 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:11,381 DEBUG [Time-limited test {}] regionserver.HRegion(5836): d2e4ca53683122d1e5c056bfac428eb6 : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "d2e4ca53683122d1e5c056bfac428eb6" family_name: "a" compaction_input: "459b6b1f0662447c8a1fc80764182dcf" compaction_input: "fd91583f4fe04af39c2606e46df2c735_SeqId_4_" compaction_input: "b2f0215a15e247f299c0e22869b7458f_SeqId_4_" compaction_input: "35940ec6121b4f628de32777370ee868_SeqId_4_" compaction_output: "7b513ec6c960443785bd5e9ae7c37659" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-12-05T13:45:11,381 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-12-05T13:45:11,381 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000008 2024-12-05T13:45:11,382 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/0000000000000000008 2024-12-05T13:45:11,383 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,383 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,384 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:11,385 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d2e4ca53683122d1e5c056bfac428eb6 2024-12-05T13:45:11,387 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testCompactedBulkLoadedFiles/d2e4ca53683122d1e5c056bfac428eb6/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T13:45:11,388 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d2e4ca53683122d1e5c056bfac428eb6; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71240562, jitterRate=0.061567097902297974}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:11,388 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d2e4ca53683122d1e5c056bfac428eb6: Writing region info on filesystem at 1733406311342Initializing all the Stores at 1733406311343 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311343Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311346 (+3 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311346Cleaning up temporary data from old regions at 1733406311383 (+37 ms)Region opened successfully at 1733406311388 (+5 ms) 2024-12-05T13:45:11,390 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d2e4ca53683122d1e5c056bfac428eb6, disabling compactions & flushes 2024-12-05T13:45:11,390 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:11,390 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:11,390 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. after waiting 0 ms 2024-12-05T13:45:11,390 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:11,391 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733406307046.d2e4ca53683122d1e5c056bfac428eb6. 2024-12-05T13:45:11,391 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d2e4ca53683122d1e5c056bfac428eb6: Waiting for close lock at 1733406311390Disabling compacts and flushes for region at 1733406311390Disabling writes for close at 1733406311390Writing region close event to WAL at 1733406311391 (+1 ms)Closed at 1733406311391 2024-12-05T13:45:11,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741929_1108 (size=93) 2024-12-05T13:45:11,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741929_1108 (size=93) 2024-12-05T13:45:11,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741929_1108 (size=93) 2024-12-05T13:45:11,395 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:45:11,395 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733406311325) 2024-12-05T13:45:11,406 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testCompactedBulkLoadedFiles Thread=443 (was 431) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:34105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_886588457_22 at /127.0.0.1:57444 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1987708253) connection to localhost/127.0.0.1:34105 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_886588457_22 at /127.0.0.1:34926 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1346 (was 1264) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=262 (was 277), ProcessCount=11 (was 11), AvailableMemoryMB=7909 (was 7927) 2024-12-05T13:45:11,406 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1346 is superior to 1024 2024-12-05T13:45:11,417 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsWrittenViaHRegion Thread=443, OpenFileDescriptor=1346, MaxFileDescriptor=1048576, SystemLoadAverage=262, ProcessCount=11, AvailableMemoryMB=7908 2024-12-05T13:45:11,417 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1346 is superior to 1024 2024-12-05T13:45:11,430 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:45:11,432 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:45:11,433 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:45:11,435 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-50337509, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/hregion-50337509, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:11,446 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-50337509/hregion-50337509.1733406311435, exclude list is [], retry=0 2024-12-05T13:45:11,449 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:11,449 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:11,449 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:11,451 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-50337509/hregion-50337509.1733406311435 2024-12-05T13:45:11,451 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:45:11,451 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 13b8dc64efc5c0d46b863bf83518bb2f, NAME => 'testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/hbase 2024-12-05T13:45:11,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741931_1110 (size=67) 2024-12-05T13:45:11,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741931_1110 (size=67) 2024-12-05T13:45:11,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741931_1110 (size=67) 2024-12-05T13:45:11,459 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:11,460 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,462 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName a 2024-12-05T13:45:11,462 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,462 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,462 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,463 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName b 2024-12-05T13:45:11,463 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,464 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,464 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,465 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName c 2024-12-05T13:45:11,465 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,465 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,465 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,466 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,466 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,467 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,467 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,468 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:11,468 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,470 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:45:11,471 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 13b8dc64efc5c0d46b863bf83518bb2f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69778872, jitterRate=0.03978621959686279}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:11,471 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 13b8dc64efc5c0d46b863bf83518bb2f: Writing region info on filesystem at 1733406311459Initializing all the Stores at 1733406311460 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311460Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311460Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311460Cleaning up temporary data from old regions at 1733406311467 (+7 ms)Region opened successfully at 1733406311471 (+4 ms) 2024-12-05T13:45:11,471 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 13b8dc64efc5c0d46b863bf83518bb2f, disabling compactions & flushes 2024-12-05T13:45:11,471 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,471 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,471 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. after waiting 0 ms 2024-12-05T13:45:11,471 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,471 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,471 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 13b8dc64efc5c0d46b863bf83518bb2f: Waiting for close lock at 1733406311471Disabling compacts and flushes for region at 1733406311471Disabling writes for close at 1733406311471Writing region close event to WAL at 1733406311471Closed at 1733406311471 2024-12-05T13:45:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741930_1109 (size=93) 2024-12-05T13:45:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741930_1109 (size=93) 2024-12-05T13:45:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741930_1109 (size=93) 2024-12-05T13:45:11,475 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:45:11,475 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-50337509:(num 1733406311435) 2024-12-05T13:45:11,475 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:11,477 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:11,488 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477, exclude list is [], retry=0 2024-12-05T13:45:11,491 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:11,491 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:11,491 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:11,493 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477 2024-12-05T13:45:11,493 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:45355:45355)] 2024-12-05T13:45:11,493 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 13b8dc64efc5c0d46b863bf83518bb2f, NAME => 'testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:11,493 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:11,493 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,493 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,495 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,495 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName a 2024-12-05T13:45:11,495 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,496 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,496 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,497 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName b 2024-12-05T13:45:11,497 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,497 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,497 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,498 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName c 2024-12-05T13:45:11,498 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,498 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,498 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,499 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,500 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,501 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,501 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,501 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:11,502 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,503 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 13b8dc64efc5c0d46b863bf83518bb2f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61020946, jitterRate=-0.09071704745292664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:11,503 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 13b8dc64efc5c0d46b863bf83518bb2f: Writing region info on filesystem at 1733406311493Initializing all the Stores at 1733406311494 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311494Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311494Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311494Cleaning up temporary data from old regions at 1733406311501 (+7 ms)Region opened successfully at 1733406311503 (+2 ms) 2024-12-05T13:45:11,512 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 13b8dc64efc5c0d46b863bf83518bb2f 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-05T13:45:11,526 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/a/607000d670db46fa93da4e2d69976228 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733406311503/Put/seqid=0 2024-12-05T13:45:11,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741933_1112 (size=5958) 2024-12-05T13:45:11,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741933_1112 (size=5958) 2024-12-05T13:45:11,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741933_1112 (size=5958) 2024-12-05T13:45:11,534 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/a/607000d670db46fa93da4e2d69976228 2024-12-05T13:45:11,541 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/a/607000d670db46fa93da4e2d69976228 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/a/607000d670db46fa93da4e2d69976228 2024-12-05T13:45:11,546 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/a/607000d670db46fa93da4e2d69976228, entries=10, sequenceid=13, filesize=5.8 K 2024-12-05T13:45:11,547 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 13b8dc64efc5c0d46b863bf83518bb2f in 35ms, sequenceid=13, compaction requested=false 2024-12-05T13:45:11,547 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 13b8dc64efc5c0d46b863bf83518bb2f: 2024-12-05T13:45:11,563 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 13b8dc64efc5c0d46b863bf83518bb2f, disabling compactions & flushes 2024-12-05T13:45:11,563 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,563 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,563 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. after waiting 0 ms 2024-12-05T13:45:11,563 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,564 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,564 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:11,564 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 13b8dc64efc5c0d46b863bf83518bb2f: Waiting for close lock at 1733406311563Disabling compacts and flushes for region at 1733406311563Disabling writes for close at 1733406311563Writing region close event to WAL at 1733406311564 (+1 ms)Closed at 1733406311564 2024-12-05T13:45:11,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741932_1111 (size=2805) 2024-12-05T13:45:11,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741932_1111 (size=2805) 2024-12-05T13:45:11,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741932_1111 (size=2805) 2024-12-05T13:45:11,586 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477, size=2.7 K (2805bytes) 2024-12-05T13:45:11,586 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477 2024-12-05T13:45:11,587 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477 after 1ms 2024-12-05T13:45:11,589 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:11,589 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477 took 3ms 2024-12-05T13:45:11,591 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477 so closing down 2024-12-05T13:45:11,591 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:45:11,593 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733406311477.temp 2024-12-05T13:45:11,597 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000003-wal.1733406311477.temp 2024-12-05T13:45:11,597 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:45:11,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741934_1113 (size=2312) 2024-12-05T13:45:11,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741934_1113 (size=2312) 2024-12-05T13:45:11,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741934_1113 (size=2312) 2024-12-05T13:45:11,604 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000003-wal.1733406311477.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-05T13:45:11,606 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000003-wal.1733406311477.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000035 2024-12-05T13:45:11,607 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 17 ms; skipped=2; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477, size=2.7 K, length=2805, corrupted=false, cancelled=false 2024-12-05T13:45:11,607 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477, journal: Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477, size=2.7 K (2805bytes) at 1733406311586Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477 so closing down at 1733406311591 (+5 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000003-wal.1733406311477.temp at 1733406311597 (+6 ms)3 split writer threads finished at 1733406311597Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000003-wal.1733406311477.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733406311604 (+7 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000003-wal.1733406311477.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000035 at 1733406311606 (+2 ms)Processed 32 edits across 1 Regions in 17 ms; skipped=2; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477, size=2.7 K, length=2805, corrupted=false, cancelled=false at 1733406311607 (+1 ms) 2024-12-05T13:45:11,609 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311477 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406311477 2024-12-05T13:45:11,609 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000035 2024-12-05T13:45:11,610 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:11,612 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:11,624 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612, exclude list is [], retry=0 2024-12-05T13:45:11,627 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:11,627 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:11,628 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:11,629 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 2024-12-05T13:45:11,629 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707)] 2024-12-05T13:45:11,630 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 13b8dc64efc5c0d46b863bf83518bb2f, NAME => 'testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:45:11,630 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:11,630 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,630 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,631 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,632 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName a 2024-12-05T13:45:11,632 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,639 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/a/607000d670db46fa93da4e2d69976228 2024-12-05T13:45:11,639 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,639 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,640 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName b 2024-12-05T13:45:11,640 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,640 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,641 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,641 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName c 2024-12-05T13:45:11,641 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:11,642 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:11,642 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,642 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,644 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,644 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000035 2024-12-05T13:45:11,647 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:11,648 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000035 2024-12-05T13:45:11,648 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 13b8dc64efc5c0d46b863bf83518bb2f 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-12-05T13:45:11,662 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/b/a9226f3f99d241368bff44346d22aebd is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733406311547/Put/seqid=0 2024-12-05T13:45:11,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741936_1115 (size=5958) 2024-12-05T13:45:11,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741936_1115 (size=5958) 2024-12-05T13:45:11,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741936_1115 (size=5958) 2024-12-05T13:45:11,670 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/b/a9226f3f99d241368bff44346d22aebd 2024-12-05T13:45:11,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/c/a992b7aa16f04555bf6c3214864d8d71 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733406311554/Put/seqid=0 2024-12-05T13:45:11,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741937_1116 (size=5958) 2024-12-05T13:45:11,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741937_1116 (size=5958) 2024-12-05T13:45:11,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741937_1116 (size=5958) 2024-12-05T13:45:11,693 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/c/a992b7aa16f04555bf6c3214864d8d71 2024-12-05T13:45:11,700 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/b/a9226f3f99d241368bff44346d22aebd as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/b/a9226f3f99d241368bff44346d22aebd 2024-12-05T13:45:11,706 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/b/a9226f3f99d241368bff44346d22aebd, entries=10, sequenceid=35, filesize=5.8 K 2024-12-05T13:45:11,707 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/c/a992b7aa16f04555bf6c3214864d8d71 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/c/a992b7aa16f04555bf6c3214864d8d71 2024-12-05T13:45:11,712 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/c/a992b7aa16f04555bf6c3214864d8d71, entries=10, sequenceid=35, filesize=5.8 K 2024-12-05T13:45:11,713 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 13b8dc64efc5c0d46b863bf83518bb2f in 65ms, sequenceid=35, compaction requested=false; wal=null 2024-12-05T13:45:11,713 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000035 2024-12-05T13:45:11,714 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,714 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,715 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:11,716 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:11,718 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-12-05T13:45:11,719 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 13b8dc64efc5c0d46b863bf83518bb2f; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62551584, jitterRate=-0.06790876388549805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:11,720 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 13b8dc64efc5c0d46b863bf83518bb2f: Writing region info on filesystem at 1733406311630Initializing all the Stores at 1733406311631 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311631Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311631Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406311631Obtaining lock to block concurrent updates at 1733406311648 (+17 ms)Preparing flush snapshotting stores in 13b8dc64efc5c0d46b863bf83518bb2f at 1733406311648Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1733406311649 (+1 ms)Flushing stores of testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. at 1733406311649Flushing 13b8dc64efc5c0d46b863bf83518bb2f/b: creating writer at 1733406311649Flushing 13b8dc64efc5c0d46b863bf83518bb2f/b: appending metadata at 1733406311662 (+13 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/b: closing flushed file at 1733406311662Flushing 13b8dc64efc5c0d46b863bf83518bb2f/c: creating writer at 1733406311674 (+12 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/c: appending metadata at 1733406311686 (+12 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/c: closing flushed file at 1733406311686Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e7178c4: reopening flushed file at 1733406311699 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@788f5fdc: reopening flushed file at 1733406311706 (+7 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 13b8dc64efc5c0d46b863bf83518bb2f in 65ms, sequenceid=35, compaction requested=false; wal=null at 1733406311713 (+7 ms)Cleaning up temporary data from old regions at 1733406311714 (+1 ms)Region opened successfully at 1733406311720 (+6 ms) 2024-12-05T13:45:11,745 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-05T13:45:11,797 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612, size=0 (0bytes) 2024-12-05T13:45:11,797 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 might be still open, length is 0 2024-12-05T13:45:11,797 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 2024-12-05T13:45:11,797 WARN [IPC Server handler 0 on default port 34105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 has not been closed. Lease recovery is in progress. RecoveryId = 1117 for block blk_1073741935_1114 2024-12-05T13:45:11,798 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 after 1ms 2024-12-05T13:45:12,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:37052 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:38521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37052 dst: /127.0.0.1:38521 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38521 remote=/127.0.0.1:37052]. Total timeout mills is 60000, 58933 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:12,818 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:34982 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:43927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34982 dst: /127.0.0.1:43927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:12,818 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-735726784_22 at /127.0.0.1:57486 [Receiving block BP-1806447417-172.17.0.2-1733406274111:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:44137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57486 dst: /127.0.0.1:44137 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:12,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741935_1117 (size=2304) 2024-12-05T13:45:12,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741935_1117 (size=2304) 2024-12-05T13:45:12,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741935_1117 (size=2304) 2024-12-05T13:45:15,798 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 after 4001ms 2024-12-05T13:45:15,802 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:15,802 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 took 4005ms 2024-12-05T13:45:15,804 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612; continuing. 2024-12-05T13:45:15,804 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 so closing down 2024-12-05T13:45:15,804 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-05T13:45:15,806 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1733406311612.temp 2024-12-05T13:45:15,807 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000037-wal.1733406311612.temp 2024-12-05T13:45:15,807 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-05T13:45:15,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741938_1118 (size=2312) 2024-12-05T13:45:15,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741938_1118 (size=2312) 2024-12-05T13:45:15,817 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000037-wal.1733406311612.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-05T13:45:15,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741938_1118 (size=2312) 2024-12-05T13:45:15,819 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000037-wal.1733406311612.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000066 2024-12-05T13:45:15,819 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612, size=0, length=0, corrupted=false, cancelled=false 2024-12-05T13:45:15,819 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612, journal: Splitting hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612, size=0 (0bytes) at 1733406311797Finishing writing output for hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 so closing down at 1733406315804 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000037-wal.1733406311612.temp at 1733406315807 (+3 ms)3 split writer threads finished at 1733406315807Closed recovered edits writer path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000037-wal.1733406311612.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733406315818 (+11 ms)Rename recovered edits hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000037-wal.1733406311612.temp to hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000066 at 1733406315819 (+1 ms)Processed 30 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612, size=0, length=0, corrupted=false, cancelled=false at 1733406315819 2024-12-05T13:45:15,820 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 to hdfs://localhost:34105/hbase/oldWALs/wal.1733406311612 2024-12-05T13:45:15,821 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000066 2024-12-05T13:45:15,821 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-05T13:45:15,822 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:34105/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430, archiveDir=hdfs://localhost:34105/hbase/oldWALs, maxLogs=32 2024-12-05T13:45:15,834 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406315823, exclude list is [], retry=0 2024-12-05T13:45:15,836 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44137,DS-bd01db56-461b-482d-8826-5939344b09cb,DISK] 2024-12-05T13:45:15,836 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43927,DS-02bc0856-58fd-483a-b6da-15cdad22a168,DISK] 2024-12-05T13:45:15,836 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38521,DS-d6728e3a-655f-4055-aaee-849d4009aeae,DISK] 2024-12-05T13:45:15,838 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406315823 2024-12-05T13:45:15,838 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45355:45355),(127.0.0.1/127.0.0.1:43707:43707),(127.0.0.1/127.0.0.1:36047:36047)] 2024-12-05T13:45:15,838 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:45:15,839 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,840 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName a 2024-12-05T13:45:15,840 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:15,845 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/a/607000d670db46fa93da4e2d69976228 2024-12-05T13:45:15,845 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:15,845 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,846 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName b 2024-12-05T13:45:15,846 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:15,852 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/b/a9226f3f99d241368bff44346d22aebd 2024-12-05T13:45:15,852 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:15,852 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,853 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b8dc64efc5c0d46b863bf83518bb2f columnFamilyName c 2024-12-05T13:45:15,853 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:45:15,860 DEBUG [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/c/a992b7aa16f04555bf6c3214864d8d71 2024-12-05T13:45:15,860 INFO [StoreOpener-13b8dc64efc5c0d46b863bf83518bb2f-1 {}] regionserver.HStore(327): Store=13b8dc64efc5c0d46b863bf83518bb2f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:45:15,860 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,861 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,862 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,862 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000066 2024-12-05T13:45:15,864 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=false, valueCompressionType=null 2024-12-05T13:45:15,867 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000066 2024-12-05T13:45:15,867 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 13b8dc64efc5c0d46b863bf83518bb2f 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-05T13:45:15,880 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/a/18d7b859ff13478b96ee5a867c1a0795 is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1733406311726/Put/seqid=0 2024-12-05T13:45:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741940_1120 (size=5958) 2024-12-05T13:45:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741940_1120 (size=5958) 2024-12-05T13:45:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741940_1120 (size=5958) 2024-12-05T13:45:15,886 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/a/18d7b859ff13478b96ee5a867c1a0795 2024-12-05T13:45:15,902 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/b/74414c12379745ea8cc090114c06cd56 is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1733406311732/Put/seqid=0 2024-12-05T13:45:15,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741941_1121 (size=5958) 2024-12-05T13:45:15,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741941_1121 (size=5958) 2024-12-05T13:45:15,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741941_1121 (size=5958) 2024-12-05T13:45:15,908 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/b/74414c12379745ea8cc090114c06cd56 2024-12-05T13:45:15,924 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/c/667520558bee473e9502395e53399a9a is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1733406311741/Put/seqid=0 2024-12-05T13:45:15,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741942_1122 (size=5958) 2024-12-05T13:45:15,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741942_1122 (size=5958) 2024-12-05T13:45:15,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741942_1122 (size=5958) 2024-12-05T13:45:15,931 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/c/667520558bee473e9502395e53399a9a 2024-12-05T13:45:15,935 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/a/18d7b859ff13478b96ee5a867c1a0795 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/a/18d7b859ff13478b96ee5a867c1a0795 2024-12-05T13:45:15,940 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/a/18d7b859ff13478b96ee5a867c1a0795, entries=10, sequenceid=66, filesize=5.8 K 2024-12-05T13:45:15,941 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/b/74414c12379745ea8cc090114c06cd56 as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/b/74414c12379745ea8cc090114c06cd56 2024-12-05T13:45:15,945 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/b/74414c12379745ea8cc090114c06cd56, entries=10, sequenceid=66, filesize=5.8 K 2024-12-05T13:45:15,946 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/.tmp/c/667520558bee473e9502395e53399a9a as hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/c/667520558bee473e9502395e53399a9a 2024-12-05T13:45:15,950 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/c/667520558bee473e9502395e53399a9a, entries=10, sequenceid=66, filesize=5.8 K 2024-12-05T13:45:15,950 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 13b8dc64efc5c0d46b863bf83518bb2f in 83ms, sequenceid=66, compaction requested=false; wal=null 2024-12-05T13:45:15,951 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/0000000000000000066 2024-12-05T13:45:15,952 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,952 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,953 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-05T13:45:15,954 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 13b8dc64efc5c0d46b863bf83518bb2f 2024-12-05T13:45:15,956 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/hbase/data/default/testReplayEditsWrittenViaHRegion/13b8dc64efc5c0d46b863bf83518bb2f/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-12-05T13:45:15,957 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 13b8dc64efc5c0d46b863bf83518bb2f; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66224165, jitterRate=-0.013183042407035828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-05T13:45:15,957 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 13b8dc64efc5c0d46b863bf83518bb2f: Writing region info on filesystem at 1733406315838Initializing all the Stores at 1733406315839 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406315839Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406315839Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406315839Obtaining lock to block concurrent updates at 1733406315867 (+28 ms)Preparing flush snapshotting stores in 13b8dc64efc5c0d46b863bf83518bb2f at 1733406315867Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1733406315867Flushing stores of testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. at 1733406315867Flushing 13b8dc64efc5c0d46b863bf83518bb2f/a: creating writer at 1733406315867Flushing 13b8dc64efc5c0d46b863bf83518bb2f/a: appending metadata at 1733406315879 (+12 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/a: closing flushed file at 1733406315879Flushing 13b8dc64efc5c0d46b863bf83518bb2f/b: creating writer at 1733406315890 (+11 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/b: appending metadata at 1733406315901 (+11 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/b: closing flushed file at 1733406315902 (+1 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/c: creating writer at 1733406315912 (+10 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/c: appending metadata at 1733406315923 (+11 ms)Flushing 13b8dc64efc5c0d46b863bf83518bb2f/c: closing flushed file at 1733406315923Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29488a63: reopening flushed file at 1733406315935 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f3ed7a6: reopening flushed file at 1733406315940 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3927a31d: reopening flushed file at 1733406315945 (+5 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 13b8dc64efc5c0d46b863bf83518bb2f in 83ms, sequenceid=66, compaction requested=false; wal=null at 1733406315950 (+5 ms)Cleaning up temporary data from old regions at 1733406315952 (+2 ms)Region opened successfully at 1733406315957 (+5 ms) 2024-12-05T13:45:15,969 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 13b8dc64efc5c0d46b863bf83518bb2f, disabling compactions & flushes 2024-12-05T13:45:15,969 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:15,969 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:15,969 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. after waiting 0 ms 2024-12-05T13:45:15,969 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:15,970 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733406311431.13b8dc64efc5c0d46b863bf83518bb2f. 2024-12-05T13:45:15,970 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 13b8dc64efc5c0d46b863bf83518bb2f: Waiting for close lock at 1733406315969Disabling compacts and flushes for region at 1733406315969Disabling writes for close at 1733406315969Writing region close event to WAL at 1733406315970 (+1 ms)Closed at 1733406315970 2024-12-05T13:45:15,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741939_1119 (size=93) 2024-12-05T13:45:15,973 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406315823 not finished, retry = 0 2024-12-05T13:45:15,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741939_1119 (size=93) 2024-12-05T13:45:15,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741939_1119 (size=93) 2024-12-05T13:45:16,078 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-05T13:45:16,078 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733406315823) 2024-12-05T13:45:16,096 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayCompressed#testReplayEditsWrittenViaHRegion Thread=450 (was 443) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1747857540_22 at /127.0.0.1:35012 [Waiting for operation #23] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1987708253) connection to localhost/127.0.0.1:34105 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1747857540_22 at /127.0.0.1:57544 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:34105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1747857540_22 at /127.0.0.1:37094 [Waiting for operation #17] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1412 (was 1346) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=241 (was 262), ProcessCount=11 (was 11), AvailableMemoryMB=7900 (was 7908) 2024-12-05T13:45:16,096 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1412 is superior to 1024 2024-12-05T13:45:16,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T13:45:16,096 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T13:45:16,097 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:45:16,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:45:16,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:45:16,097 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T13:45:16,097 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T13:45:16,097 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=438832029, stopped=false 2024-12-05T13:45:16,098 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=da6aa2204f50,39625,1733406277651 2024-12-05T13:45:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T13:45:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T13:45:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T13:45:16,145 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T13:45:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:45:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:45:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:45:16,145 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T13:45:16,146 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:45:16,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:45:16,146 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:45:16,146 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:45:16,146 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:45:16,146 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'da6aa2204f50,43377,1733406278401' ***** 2024-12-05T13:45:16,146 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T13:45:16,146 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'da6aa2204f50,42407,1733406278528' ***** 2024-12-05T13:45:16,146 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T13:45:16,146 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T13:45:16,146 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T13:45:16,146 INFO [RS:0;da6aa2204f50:43377 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T13:45:16,146 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T13:45:16,146 INFO [RS:2;da6aa2204f50:42407 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T13:45:16,146 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T13:45:16,147 INFO [RS:0;da6aa2204f50:43377 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(3091): Received CLOSE for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:45:16,147 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(959): stopping server da6aa2204f50,43377,1733406278401 2024-12-05T13:45:16,147 INFO [RS:0;da6aa2204f50:43377 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:45:16,147 INFO [RS:0;da6aa2204f50:43377 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;da6aa2204f50:43377. 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(959): stopping server da6aa2204f50,42407,1733406278528 2024-12-05T13:45:16,147 DEBUG [RS:0;da6aa2204f50:43377 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:45:16,147 DEBUG [RS:0;da6aa2204f50:43377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;da6aa2204f50:42407. 2024-12-05T13:45:16,147 DEBUG [RS:2;da6aa2204f50:42407 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:45:16,147 DEBUG [RS:2;da6aa2204f50:42407 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:45:16,147 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(976): stopping server da6aa2204f50,43377,1733406278401; all regions closed. 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T13:45:16,147 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff, disabling compactions & flushes 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T13:45:16,147 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T13:45:16,147 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:45:16,147 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. after waiting 0 ms 2024-12-05T13:45:16,147 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:45:16,147 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T13:45:16,147 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff=testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff.} 2024-12-05T13:45:16,147 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T13:45:16,147 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T13:45:16,147 DEBUG [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff 2024-12-05T13:45:16,147 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T13:45:16,147 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T13:45:16,148 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T13:45:16,148 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.19 KB heapSize=2.79 KB 2024-12-05T13:45:16,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741835_1011 (size=2054) 2024-12-05T13:45:16,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741835_1011 (size=2054) 2024-12-05T13:45:16,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741835_1011 (size=2054) 2024-12-05T13:45:16,152 DEBUG [RS:0;da6aa2204f50:43377 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs 2024-12-05T13:45:16,152 INFO [RS:0;da6aa2204f50:43377 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL da6aa2204f50%2C43377%2C1733406278401:(num 1733406279958) 2024-12-05T13:45:16,152 DEBUG [RS:0;da6aa2204f50:43377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:45:16,152 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:45:16,152 INFO [RS:0;da6aa2204f50:43377 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:45:16,153 INFO [RS:0;da6aa2204f50:43377 {}] hbase.ChoreService(370): Chore service for: regionserver/da6aa2204f50:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-05T13:45:16,153 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T13:45:16,153 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T13:45:16,153 INFO [regionserver/da6aa2204f50:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:45:16,153 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T13:45:16,153 INFO [RS:0;da6aa2204f50:43377 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:45:16,153 INFO [RS:0;da6aa2204f50:43377 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43377 2024-12-05T13:45:16,153 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/default/testReplayEditsAfterRegionMovedWithMultiCF/2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-12-05T13:45:16,153 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/info/40d22f9ce47f4f9fb110f63f714e5a17 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff./info:regioninfo/1733406296336/Put/seqid=0 2024-12-05T13:45:16,154 INFO [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:45:16,154 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff: Waiting for close lock at 1733406316147Running coprocessor pre-close hooks at 1733406316147Disabling compacts and flushes for region at 1733406316147Disabling writes for close at 1733406316147Writing region close event to WAL at 1733406316148 (+1 ms)Running coprocessor post-close hooks at 1733406316154 (+6 ms)Closed at 1733406316154 2024-12-05T13:45:16,154 DEBUG [RS_CLOSE_REGION-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733406292345.2f1f9b4b0cc0e9bb5fbd99e20b8ea9ff. 2024-12-05T13:45:16,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741943_1123 (size=6778) 2024-12-05T13:45:16,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741943_1123 (size=6778) 2024-12-05T13:45:16,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741943_1123 (size=6778) 2024-12-05T13:45:16,159 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.19 KB at sequenceid=23 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/info/40d22f9ce47f4f9fb110f63f714e5a17 2024-12-05T13:45:16,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/da6aa2204f50,43377,1733406278401 2024-12-05T13:45:16,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:45:16,161 INFO [RS:0;da6aa2204f50:43377 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:45:16,164 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/.tmp/info/40d22f9ce47f4f9fb110f63f714e5a17 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/info/40d22f9ce47f4f9fb110f63f714e5a17 2024-12-05T13:45:16,169 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/info/40d22f9ce47f4f9fb110f63f714e5a17, entries=8, sequenceid=23, filesize=6.6 K 2024-12-05T13:45:16,170 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=23, compaction requested=false 2024-12-05T13:45:16,170 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [da6aa2204f50,43377,1733406278401] 2024-12-05T13:45:16,174 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/data/hbase/meta/1588230740/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=18 2024-12-05T13:45:16,174 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T13:45:16,175 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T13:45:16,175 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733406316147Running coprocessor pre-close hooks at 1733406316147Disabling compacts and flushes for region at 1733406316147Disabling writes for close at 1733406316148 (+1 ms)Obtaining lock to block concurrent updates at 1733406316148Preparing flush snapshotting stores in 1588230740 at 1733406316148Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1218, getHeapSize=2792, getOffHeapSize=0, getCellsCount=8 at 1733406316148Flushing stores of hbase:meta,,1.1588230740 at 1733406316148Flushing 1588230740/info: creating writer at 1733406316149 (+1 ms)Flushing 1588230740/info: appending metadata at 1733406316153 (+4 ms)Flushing 1588230740/info: closing flushed file at 1733406316153Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@141df1e5: reopening flushed file at 1733406316164 (+11 ms)Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=23, compaction requested=false at 1733406316170 (+6 ms)Writing region close event to WAL at 1733406316171 (+1 ms)Running coprocessor post-close hooks at 1733406316174 (+3 ms)Closed at 1733406316175 (+1 ms) 2024-12-05T13:45:16,175 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T13:45:16,178 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/da6aa2204f50,43377,1733406278401 already deleted, retry=false 2024-12-05T13:45:16,178 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; da6aa2204f50,43377,1733406278401 expired; onlineServers=1 2024-12-05T13:45:16,239 INFO [regionserver/da6aa2204f50:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:45:16,239 INFO [regionserver/da6aa2204f50:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:45:16,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:45:16,270 INFO [RS:0;da6aa2204f50:43377 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:45:16,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43377-0x101a7065f890001, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:45:16,270 INFO [RS:0;da6aa2204f50:43377 {}] regionserver.HRegionServer(1031): Exiting; stopping=da6aa2204f50,43377,1733406278401; zookeeper connection closed. 2024-12-05T13:45:16,270 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52eea94d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52eea94d 2024-12-05T13:45:16,348 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(976): stopping server da6aa2204f50,42407,1733406278528; all regions closed. 2024-12-05T13:45:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741893_1071 (size=1552) 2024-12-05T13:45:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741893_1071 (size=1552) 2024-12-05T13:45:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741893_1071 (size=1552) 2024-12-05T13:45:16,353 DEBUG [RS:2;da6aa2204f50:42407 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs 2024-12-05T13:45:16,353 INFO [RS:2;da6aa2204f50:42407 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL da6aa2204f50%2C42407%2C1733406278528.meta:.meta(num 1733406295499) 2024-12-05T13:45:16,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741834_1010 (size=841) 2024-12-05T13:45:16,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741834_1010 (size=841) 2024-12-05T13:45:16,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741834_1010 (size=841) 2024-12-05T13:45:16,358 DEBUG [RS:2;da6aa2204f50:42407 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/oldWALs 2024-12-05T13:45:16,358 INFO [RS:2;da6aa2204f50:42407 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL da6aa2204f50%2C42407%2C1733406278528:(num 1733406279958) 2024-12-05T13:45:16,358 DEBUG [RS:2;da6aa2204f50:42407 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:45:16,358 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:45:16,358 INFO [RS:2;da6aa2204f50:42407 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:45:16,358 INFO [RS:2;da6aa2204f50:42407 {}] hbase.ChoreService(370): Chore service for: regionserver/da6aa2204f50:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T13:45:16,358 INFO [RS:2;da6aa2204f50:42407 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:45:16,358 INFO [regionserver/da6aa2204f50:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:45:16,359 INFO [RS:2;da6aa2204f50:42407 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42407 2024-12-05T13:45:16,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/da6aa2204f50,42407,1733406278528 2024-12-05T13:45:16,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:45:16,366 INFO [RS:2;da6aa2204f50:42407 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:45:16,374 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [da6aa2204f50,42407,1733406278528] 2024-12-05T13:45:16,382 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/da6aa2204f50,42407,1733406278528 already deleted, retry=false 2024-12-05T13:45:16,383 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; da6aa2204f50,42407,1733406278528 expired; onlineServers=0 2024-12-05T13:45:16,383 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'da6aa2204f50,39625,1733406277651' ***** 2024-12-05T13:45:16,383 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T13:45:16,383 INFO [M:0;da6aa2204f50:39625 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:45:16,383 INFO [M:0;da6aa2204f50:39625 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:45:16,383 DEBUG [M:0;da6aa2204f50:39625 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T13:45:16,383 DEBUG [M:0;da6aa2204f50:39625 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T13:45:16,383 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T13:45:16,383 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406279581 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406279581,5,FailOnTimeoutGroup] 2024-12-05T13:45:16,383 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406279582 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406279582,5,FailOnTimeoutGroup] 2024-12-05T13:45:16,383 INFO [M:0;da6aa2204f50:39625 {}] hbase.ChoreService(370): Chore service for: master/da6aa2204f50:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T13:45:16,384 INFO [M:0;da6aa2204f50:39625 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:45:16,384 DEBUG [M:0;da6aa2204f50:39625 {}] master.HMaster(1795): Stopping service threads 2024-12-05T13:45:16,384 INFO [M:0;da6aa2204f50:39625 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T13:45:16,384 INFO [M:0;da6aa2204f50:39625 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T13:45:16,384 INFO [M:0;da6aa2204f50:39625 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T13:45:16,384 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T13:45:16,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:45:16,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:45:16,391 DEBUG [M:0;da6aa2204f50:39625 {}] zookeeper.ZKUtil(347): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T13:45:16,391 WARN [M:0;da6aa2204f50:39625 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T13:45:16,392 INFO [M:0;da6aa2204f50:39625 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/.lastflushedseqids 2024-12-05T13:45:16,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741944_1124 (size=119) 2024-12-05T13:45:16,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741944_1124 (size=119) 2024-12-05T13:45:16,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741944_1124 (size=119) 2024-12-05T13:45:16,404 INFO [M:0;da6aa2204f50:39625 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T13:45:16,404 INFO [M:0;da6aa2204f50:39625 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T13:45:16,404 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T13:45:16,404 INFO [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:45:16,404 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:45:16,404 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T13:45:16,404 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:45:16,404 INFO [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=83.38 KB heapSize=102.70 KB 2024-12-05T13:45:16,419 DEBUG [M:0;da6aa2204f50:39625 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12e04b93230a46bc9d266902c37ee1f3 is 82, key is hbase:meta,,1/info:regioninfo/1733406295645/Put/seqid=0 2024-12-05T13:45:16,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741945_1125 (size=6063) 2024-12-05T13:45:16,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741945_1125 (size=6063) 2024-12-05T13:45:16,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741945_1125 (size=6063) 2024-12-05T13:45:16,429 INFO [M:0;da6aa2204f50:39625 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1008 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12e04b93230a46bc9d266902c37ee1f3 2024-12-05T13:45:16,454 DEBUG [M:0;da6aa2204f50:39625 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b91e42f28254ba2a40752b127346851 is 1075, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733406292787/Put/seqid=0 2024-12-05T13:45:16,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741946_1126 (size=7906) 2024-12-05T13:45:16,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741946_1126 (size=7906) 2024-12-05T13:45:16,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741946_1126 (size=7906) 2024-12-05T13:45:16,461 INFO [M:0;da6aa2204f50:39625 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=82.16 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b91e42f28254ba2a40752b127346851 2024-12-05T13:45:16,465 INFO [M:0;da6aa2204f50:39625 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1b91e42f28254ba2a40752b127346851 2024-12-05T13:45:16,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:45:16,474 INFO [RS:2;da6aa2204f50:42407 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:45:16,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x101a7065f890003, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:45:16,475 INFO [RS:2;da6aa2204f50:42407 {}] regionserver.HRegionServer(1031): Exiting; stopping=da6aa2204f50,42407,1733406278528; zookeeper connection closed. 2024-12-05T13:45:16,475 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2e6495c6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2e6495c6 2024-12-05T13:45:16,475 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T13:45:16,485 DEBUG [M:0;da6aa2204f50:39625 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/801d2f24d57e4d1f965e3bcc9bf113fa is 69, key is da6aa2204f50,42407,1733406278528/rs:state/1733406279697/Put/seqid=0 2024-12-05T13:45:16,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741947_1127 (size=5440) 2024-12-05T13:45:16,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741947_1127 (size=5440) 2024-12-05T13:45:16,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741947_1127 (size=5440) 2024-12-05T13:45:16,492 INFO [M:0;da6aa2204f50:39625 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/801d2f24d57e4d1f965e3bcc9bf113fa 2024-12-05T13:45:16,496 INFO [M:0;da6aa2204f50:39625 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 801d2f24d57e4d1f965e3bcc9bf113fa 2024-12-05T13:45:16,497 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12e04b93230a46bc9d266902c37ee1f3 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12e04b93230a46bc9d266902c37ee1f3 2024-12-05T13:45:16,501 INFO [M:0;da6aa2204f50:39625 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12e04b93230a46bc9d266902c37ee1f3, entries=14, sequenceid=207, filesize=5.9 K 2024-12-05T13:45:16,503 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b91e42f28254ba2a40752b127346851 as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1b91e42f28254ba2a40752b127346851 2024-12-05T13:45:16,508 INFO [M:0;da6aa2204f50:39625 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1b91e42f28254ba2a40752b127346851 2024-12-05T13:45:16,508 INFO [M:0;da6aa2204f50:39625 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1b91e42f28254ba2a40752b127346851, entries=21, sequenceid=207, filesize=7.7 K 2024-12-05T13:45:16,509 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/801d2f24d57e4d1f965e3bcc9bf113fa as hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/801d2f24d57e4d1f965e3bcc9bf113fa 2024-12-05T13:45:16,513 INFO [M:0;da6aa2204f50:39625 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 801d2f24d57e4d1f965e3bcc9bf113fa 2024-12-05T13:45:16,513 INFO [M:0;da6aa2204f50:39625 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/5075dc15-85b5-9d72-2464-9ca3f0c91f13/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/801d2f24d57e4d1f965e3bcc9bf113fa, entries=3, sequenceid=207, filesize=5.3 K 2024-12-05T13:45:16,514 INFO [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(3140): Finished flush of dataSize ~83.38 KB/85386, heapSize ~102.40 KB/104856, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=207, compaction requested=false 2024-12-05T13:45:16,516 INFO [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:45:16,516 DEBUG [M:0;da6aa2204f50:39625 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733406316404Disabling compacts and flushes for region at 1733406316404Disabling writes for close at 1733406316404Obtaining lock to block concurrent updates at 1733406316404Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733406316404Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=85386, getHeapSize=105096, getOffHeapSize=0, getCellsCount=248 at 1733406316405 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733406316405Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733406316405Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733406316418 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733406316418Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733406316434 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733406316453 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733406316453Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733406316465 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733406316484 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733406316484Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56faf2ca: reopening flushed file at 1733406316496 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58a86599: reopening flushed file at 1733406316501 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9aaaef4: reopening flushed file at 1733406316508 (+7 ms)Finished flush of dataSize ~83.38 KB/85386, heapSize ~102.40 KB/104856, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=207, compaction requested=false at 1733406316514 (+6 ms)Writing region close event to WAL at 1733406316516 (+2 ms)Closed at 1733406316516 2024-12-05T13:45:16,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44137 is added to blk_1073741830_1006 (size=86735) 2024-12-05T13:45:16,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43927 is added to blk_1073741830_1006 (size=86735) 2024-12-05T13:45:16,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38521 is added to blk_1073741830_1006 (size=86735) 2024-12-05T13:45:16,519 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:45:16,519 INFO [M:0;da6aa2204f50:39625 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T13:45:16,519 INFO [M:0;da6aa2204f50:39625 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39625 2024-12-05T13:45:16,519 INFO [M:0;da6aa2204f50:39625 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:45:16,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:45:16,628 INFO [M:0;da6aa2204f50:39625 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:45:16,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x101a7065f890000, quorum=127.0.0.1:53425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:45:16,635 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406306264 with renewLeaseKey: DEFAULT_16688 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406306264 (inode 16688) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733406296909/wal.1733406306264 (inode 16688) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-05T13:45:16,638 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296731 with renewLeaseKey: DEFAULT_16665 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296731 (inode 16665) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733406296431/wal.1733406296731 (inode 16665) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-05T13:45:16,638 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733406287964/wal.1733406288064 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:16,640 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306894 with renewLeaseKey: DEFAULT_16714 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306894 (inode 16714) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733406306448/wal.1733406306894 (inode 16714) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-05T13:45:16,640 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733406307045/wal.1733406307102 with renewLeaseKey: DEFAULT_16736 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:16,640 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733406311430/wal.1733406311612 with renewLeaseKey: DEFAULT_16777 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:16,641 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733406281606/wal.1733406281670 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:45:16,642 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal.1733406281478 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal.1733406281478 (inode 16485) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733406281304/wal.1733406281478 (inode 16485) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-05T13:45:16,644 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal.1733406281066 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal.1733406281066 (inode 16462) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733406280850/wal.1733406281066 (inode 16462) Holder DFSClient_NONMAPREDUCE_-735726784_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-05T13:45:16,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4290616c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:45:16,650 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e25d2b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T13:45:16,651 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T13:45:16,651 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6853f5bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T13:45:16,651 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf0fdb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,STOPPED} 2024-12-05T13:45:16,653 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T13:45:16,653 WARN [BP-1806447417-172.17.0.2-1733406274111 heartbeating to localhost/127.0.0.1:34105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T13:45:16,653 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T13:45:16,653 WARN [BP-1806447417-172.17.0.2-1733406274111 heartbeating to localhost/127.0.0.1:34105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1806447417-172.17.0.2-1733406274111 (Datanode Uuid 9755e825-eb33-44d2-b506-195533761fcc) service to localhost/127.0.0.1:34105 2024-12-05T13:45:16,655 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data5/current/BP-1806447417-172.17.0.2-1733406274111 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:45:16,655 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data6/current/BP-1806447417-172.17.0.2-1733406274111 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:45:16,655 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T13:45:16,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28e1ba78{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:45:16,660 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@702c0733{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T13:45:16,660 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T13:45:16,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3264f7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T13:45:16,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e971547{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,STOPPED} 2024-12-05T13:45:16,661 WARN [BP-1806447417-172.17.0.2-1733406274111 heartbeating to localhost/127.0.0.1:34105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T13:45:16,661 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T13:45:16,661 WARN [BP-1806447417-172.17.0.2-1733406274111 heartbeating to localhost/127.0.0.1:34105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1806447417-172.17.0.2-1733406274111 (Datanode Uuid aece2e20-6afc-40e8-a0a7-d473ec6b7265) service to localhost/127.0.0.1:34105 2024-12-05T13:45:16,661 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T13:45:16,662 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data3/current/BP-1806447417-172.17.0.2-1733406274111 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:45:16,662 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data4/current/BP-1806447417-172.17.0.2-1733406274111 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:45:16,662 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T13:45:16,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4dc44ec6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:45:16,665 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17f6e478{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T13:45:16,665 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T13:45:16,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d33512e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T13:45:16,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b895d95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,STOPPED} 2024-12-05T13:45:16,666 WARN [BP-1806447417-172.17.0.2-1733406274111 heartbeating to localhost/127.0.0.1:34105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T13:45:16,666 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T13:45:16,666 WARN [BP-1806447417-172.17.0.2-1733406274111 heartbeating to localhost/127.0.0.1:34105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1806447417-172.17.0.2-1733406274111 (Datanode Uuid 96498956-7bd8-4587-ac7d-05a24b9825d4) service to localhost/127.0.0.1:34105 2024-12-05T13:45:16,666 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T13:45:16,666 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data1/current/BP-1806447417-172.17.0.2-1733406274111 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:45:16,666 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/cluster_85ca2354-b7b9-c7f9-c352-3c3aa94503de/data/data2/current/BP-1806447417-172.17.0.2-1733406274111 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:45:16,667 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T13:45:16,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1843526c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T13:45:16,672 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@776c4d96{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T13:45:16,672 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T13:45:16,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70596a48{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T13:45:16,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6630be9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f557e20-91ff-b8eb-2490-226348ab106f/hadoop.log.dir/,STOPPED} 2024-12-05T13:45:16,682 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T13:45:16,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down