2024-12-07 12:48:30,503 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@210ab13f 2024-12-07 12:48:30,522 main DEBUG Took 0.017207 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 12:48:30,523 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 12:48:30,523 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 12:48:30,524 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 12:48:30,526 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,535 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 12:48:30,547 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,548 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,549 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,549 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,550 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,550 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,551 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,551 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,551 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,552 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,552 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,553 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,553 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,553 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,554 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,554 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,554 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,555 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,555 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,555 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,556 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,556 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,556 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,556 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:48:30,557 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,557 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 12:48:30,558 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:48:30,560 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 12:48:30,561 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 12:48:30,562 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 12:48:30,563 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 12:48:30,563 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 12:48:30,570 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 12:48:30,572 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 12:48:30,574 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 12:48:30,574 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 12:48:30,575 main DEBUG createAppenders(={Console}) 2024-12-07 12:48:30,576 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@210ab13f initialized 2024-12-07 12:48:30,576 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@210ab13f 2024-12-07 12:48:30,576 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@210ab13f OK. 2024-12-07 12:48:30,577 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 12:48:30,577 main DEBUG OutputStream closed 2024-12-07 12:48:30,577 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 12:48:30,577 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 12:48:30,578 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@61001b64 OK 2024-12-07 12:48:30,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 12:48:30,642 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 12:48:30,643 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 12:48:30,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 12:48:30,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 12:48:30,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 12:48:30,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 12:48:30,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 12:48:30,646 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 12:48:30,646 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 12:48:30,646 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 12:48:30,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 12:48:30,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 12:48:30,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 12:48:30,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 12:48:30,648 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 12:48:30,648 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 12:48:30,649 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 12:48:30,651 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 12:48:30,651 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@4ed5eb72) with optional ClassLoader: null 2024-12-07 12:48:30,651 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 12:48:30,652 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@4ed5eb72] started OK. 2024-12-07T12:48:30,859 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb 2024-12-07 12:48:30,862 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 12:48:30,863 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T12:48:30,871 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-12-07T12:48:30,897 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T12:48:30,936 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T12:48:30,936 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T12:48:30,946 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:48:30,958 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96, deleteOnExit=true 2024-12-07T12:48:30,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:48:30,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/test.cache.data in system properties and HBase conf 2024-12-07T12:48:30,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:48:30,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:48:30,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:48:30,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:48:30,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:48:31,026 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T12:48:31,095 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:48:31,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:48:31,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:48:31,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:48:31,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:48:31,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:48:31,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:48:31,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:48:31,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:48:31,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:48:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:48:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:48:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:48:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:48:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:48:31,783 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T12:48:31,856 INFO [Time-limited test {}] log.Log(170): Logging initialized @1940ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T12:48:31,940 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:48:32,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:48:32,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:48:32,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:48:32,026 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:48:32,041 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:48:32,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@751489f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:48:32,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e3a0561{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:48:32,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73b32aa2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/java.io.tmpdir/jetty-localhost-34351-hadoop-hdfs-3_4_1-tests_jar-_-any-7994184427158993087/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:48:32,218 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e2ddae{HTTP/1.1, (http/1.1)}{localhost:34351} 2024-12-07T12:48:32,219 INFO [Time-limited test {}] server.Server(415): Started @2303ms 2024-12-07T12:48:32,524 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:48:32,531 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:48:32,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:48:32,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:48:32,533 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:48:32,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50901468{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:48:32,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19895485{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:48:32,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17be2a4f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/java.io.tmpdir/jetty-localhost-45869-hadoop-hdfs-3_4_1-tests_jar-_-any-502362430936586040/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:48:32,645 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ecf726e{HTTP/1.1, (http/1.1)}{localhost:45869} 2024-12-07T12:48:32,645 INFO [Time-limited test {}] server.Server(415): Started @2729ms 2024-12-07T12:48:32,696 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:48:32,812 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:48:32,819 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:48:32,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:48:32,822 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:48:32,822 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:48:32,825 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b152b3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:48:32,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edd15fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:48:32,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70b25848{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/java.io.tmpdir/jetty-localhost-46259-hadoop-hdfs-3_4_1-tests_jar-_-any-9853932059894728862/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:48:32,923 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a8a49ff{HTTP/1.1, (http/1.1)}{localhost:46259} 2024-12-07T12:48:32,924 INFO [Time-limited test {}] server.Server(415): Started @3008ms 2024-12-07T12:48:32,926 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:48:32,962 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:48:32,967 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:48:32,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:48:32,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:48:32,970 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:48:32,970 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bd4c69c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:48:32,971 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@582ba343{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:48:33,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c5b5cda{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/java.io.tmpdir/jetty-localhost-45855-hadoop-hdfs-3_4_1-tests_jar-_-any-2537979833887129659/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:48:33,076 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32eb964{HTTP/1.1, (http/1.1)}{localhost:45855} 2024-12-07T12:48:33,076 INFO [Time-limited test {}] server.Server(415): Started @3160ms 2024-12-07T12:48:33,079 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:48:33,095 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data2/current/BP-1122527667-172.17.0.2-1733575711591/current, will proceed with Du for space computation calculation, 2024-12-07T12:48:33,095 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data4/current/BP-1122527667-172.17.0.2-1733575711591/current, will proceed with Du for space computation calculation, 2024-12-07T12:48:33,095 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data1/current/BP-1122527667-172.17.0.2-1733575711591/current, will proceed with Du for space computation calculation, 2024-12-07T12:48:33,096 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data3/current/BP-1122527667-172.17.0.2-1733575711591/current, will proceed with Du for space computation calculation, 2024-12-07T12:48:33,158 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:48:33,158 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:48:33,187 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data5/current/BP-1122527667-172.17.0.2-1733575711591/current, will proceed with Du for space computation calculation, 2024-12-07T12:48:33,190 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data6/current/BP-1122527667-172.17.0.2-1733575711591/current, will proceed with Du for space computation calculation, 2024-12-07T12:48:33,219 WARN [Thread-118 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:48:33,228 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x392bf6ce3e046a6 with lease ID 0x2e98b3f23b9674f5: Processing first storage report for DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674 from datanode DatanodeRegistration(127.0.0.1:35169, datanodeUuid=003b6861-c907-45e5-84dc-58ef67fae0c7, infoPort=42693, infoSecurePort=0, ipcPort=43093, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591) 2024-12-07T12:48:33,228 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x392bf6ce3e046a6 with lease ID 0x2e98b3f23b9674f5: from storage DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674 node DatanodeRegistration(127.0.0.1:35169, datanodeUuid=003b6861-c907-45e5-84dc-58ef67fae0c7, infoPort=42693, infoSecurePort=0, ipcPort=43093, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:48:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa52661b3561b014 with lease ID 0x2e98b3f23b9674f6: Processing first storage report for DS-c130b5b7-9f3f-4346-b574-e4858082ed6f from datanode DatanodeRegistration(127.0.0.1:46077, datanodeUuid=2e57ee44-cfff-4171-9414-358776e55f15, infoPort=40403, infoSecurePort=0, ipcPort=41889, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591) 2024-12-07T12:48:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa52661b3561b014 with lease ID 0x2e98b3f23b9674f6: from storage DS-c130b5b7-9f3f-4346-b574-e4858082ed6f node DatanodeRegistration(127.0.0.1:46077, datanodeUuid=2e57ee44-cfff-4171-9414-358776e55f15, infoPort=40403, infoSecurePort=0, ipcPort=41889, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:48:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c9a88992d61c380 with lease ID 0x2e98b3f23b9674f4: Processing first storage report for DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242 from datanode DatanodeRegistration(127.0.0.1:36341, datanodeUuid=c9941af7-8f70-4ab7-a32f-11dc9cb8dc32, infoPort=35333, infoSecurePort=0, ipcPort=38813, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591) 2024-12-07T12:48:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c9a88992d61c380 with lease ID 0x2e98b3f23b9674f4: from storage DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242 node DatanodeRegistration(127.0.0.1:36341, datanodeUuid=c9941af7-8f70-4ab7-a32f-11dc9cb8dc32, infoPort=35333, infoSecurePort=0, ipcPort=38813, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:48:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x392bf6ce3e046a6 with lease ID 0x2e98b3f23b9674f5: Processing first storage report for DS-c7f4a12e-178f-467a-81f8-40868c225f75 from datanode DatanodeRegistration(127.0.0.1:35169, datanodeUuid=003b6861-c907-45e5-84dc-58ef67fae0c7, infoPort=42693, infoSecurePort=0, ipcPort=43093, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591) 2024-12-07T12:48:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x392bf6ce3e046a6 with lease ID 0x2e98b3f23b9674f5: from storage DS-c7f4a12e-178f-467a-81f8-40868c225f75 node DatanodeRegistration(127.0.0.1:35169, datanodeUuid=003b6861-c907-45e5-84dc-58ef67fae0c7, infoPort=42693, infoSecurePort=0, ipcPort=43093, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:48:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa52661b3561b014 with lease ID 0x2e98b3f23b9674f6: Processing first storage report for DS-887b5125-a0a2-4f7c-9330-c4d0610a08ab from datanode DatanodeRegistration(127.0.0.1:46077, datanodeUuid=2e57ee44-cfff-4171-9414-358776e55f15, infoPort=40403, infoSecurePort=0, ipcPort=41889, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591) 2024-12-07T12:48:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa52661b3561b014 with lease ID 0x2e98b3f23b9674f6: from storage DS-887b5125-a0a2-4f7c-9330-c4d0610a08ab node DatanodeRegistration(127.0.0.1:46077, datanodeUuid=2e57ee44-cfff-4171-9414-358776e55f15, infoPort=40403, infoSecurePort=0, ipcPort=41889, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:48:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c9a88992d61c380 with lease ID 0x2e98b3f23b9674f4: Processing first storage report for DS-c960a9d6-0001-4614-8961-24f367376f8f from datanode DatanodeRegistration(127.0.0.1:36341, datanodeUuid=c9941af7-8f70-4ab7-a32f-11dc9cb8dc32, infoPort=35333, infoSecurePort=0, ipcPort=38813, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591) 2024-12-07T12:48:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c9a88992d61c380 with lease ID 0x2e98b3f23b9674f4: from storage DS-c960a9d6-0001-4614-8961-24f367376f8f node DatanodeRegistration(127.0.0.1:36341, datanodeUuid=c9941af7-8f70-4ab7-a32f-11dc9cb8dc32, infoPort=35333, infoSecurePort=0, ipcPort=38813, storageInfo=lv=-57;cid=testClusterID;nsid=1248755822;c=1733575711591), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:48:33,438 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb 2024-12-07T12:48:33,501 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/zookeeper_0, clientPort=62259, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:48:33,519 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62259 2024-12-07T12:48:33,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:33,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:33,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:48:33,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:48:33,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:48:34,130 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23 with version=8 2024-12-07T12:48:34,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/hbase-staging 2024-12-07T12:48:34,359 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2d46b487c067:0 server-side Connection retries=45 2024-12-07T12:48:34,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,371 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:48:34,371 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,371 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:48:34,487 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:48:34,540 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T12:48:34,548 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T12:48:34,551 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:48:34,576 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 77037 (auto-detected) 2024-12-07T12:48:34,577 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T12:48:34,596 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37233 2024-12-07T12:48:34,615 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37233 connecting to ZooKeeper ensemble=127.0.0.1:62259 2024-12-07T12:48:34,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:372330x0, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:48:34,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37233-0x100b4b97f370000 connected 2024-12-07T12:48:34,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,686 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:48:34,690 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23, hbase.cluster.distributed=false 2024-12-07T12:48:34,711 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:48:34,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37233 2024-12-07T12:48:34,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37233 2024-12-07T12:48:34,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37233 2024-12-07T12:48:34,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37233 2024-12-07T12:48:34,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37233 2024-12-07T12:48:34,802 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2d46b487c067:0 server-side Connection retries=45 2024-12-07T12:48:34,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,803 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:48:34,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:48:34,806 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:48:34,807 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:48:34,808 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39787 2024-12-07T12:48:34,810 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39787 connecting to ZooKeeper ensemble=127.0.0.1:62259 2024-12-07T12:48:34,811 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397870x0, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:48:34,821 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:397870x0, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:48:34,824 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39787-0x100b4b97f370001 connected 2024-12-07T12:48:34,825 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:48:34,832 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:48:34,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:48:34,839 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:48:34,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39787 2024-12-07T12:48:34,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39787 2024-12-07T12:48:34,841 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39787 2024-12-07T12:48:34,841 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39787 2024-12-07T12:48:34,842 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39787 2024-12-07T12:48:34,856 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2d46b487c067:0 server-side Connection retries=45 2024-12-07T12:48:34,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,857 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:48:34,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:48:34,858 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:48:34,858 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:48:34,859 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39027 2024-12-07T12:48:34,860 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39027 connecting to ZooKeeper ensemble=127.0.0.1:62259 2024-12-07T12:48:34,861 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,864 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390270x0, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:48:34,870 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39027-0x100b4b97f370002 connected 2024-12-07T12:48:34,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:48:34,871 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:48:34,873 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:48:34,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:48:34,876 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:48:34,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39027 2024-12-07T12:48:34,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39027 2024-12-07T12:48:34,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39027 2024-12-07T12:48:34,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39027 2024-12-07T12:48:34,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39027 2024-12-07T12:48:34,899 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2d46b487c067:0 server-side Connection retries=45 2024-12-07T12:48:34,900 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,900 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,900 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:48:34,900 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:48:34,900 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:48:34,900 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:48:34,901 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:48:34,901 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44445 2024-12-07T12:48:34,903 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44445 connecting to ZooKeeper ensemble=127.0.0.1:62259 2024-12-07T12:48:34,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,906 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:34,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:444450x0, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:48:34,910 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:444450x0, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:48:34,911 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44445-0x100b4b97f370003 connected 2024-12-07T12:48:34,911 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:48:34,912 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:48:34,913 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:48:34,915 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:48:34,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44445 2024-12-07T12:48:34,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44445 2024-12-07T12:48:34,918 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44445 2024-12-07T12:48:34,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44445 2024-12-07T12:48:34,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44445 2024-12-07T12:48:34,934 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2d46b487c067:37233 2024-12-07T12:48:34,934 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2d46b487c067,37233,1733575714217 2024-12-07T12:48:34,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,942 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2d46b487c067,37233,1733575714217 2024-12-07T12:48:34,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:34,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:48:34,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:48:34,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:48:34,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:34,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:34,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:34,964 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:48:34,965 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2d46b487c067,37233,1733575714217 from backup master directory 2024-12-07T12:48:34,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2d46b487c067,37233,1733575714217 2024-12-07T12:48:34,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:48:34,968 WARN [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:48:34,969 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2d46b487c067,37233,1733575714217 2024-12-07T12:48:34,971 INFO [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T12:48:34,972 INFO [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T12:48:35,021 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/hbase.id] with ID: b41e2b7d-ff03-4be2-9172-a81b81a933a0 2024-12-07T12:48:35,021 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/.tmp/hbase.id 2024-12-07T12:48:35,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:48:35,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:48:35,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:48:35,036 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/.tmp/hbase.id]:[hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/hbase.id] 2024-12-07T12:48:35,079 INFO [master/2d46b487c067:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:48:35,083 INFO [master/2d46b487c067:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:48:35,101 INFO [master/2d46b487c067:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-07T12:48:35,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:48:35,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:48:35,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:48:35,138 INFO [master/2d46b487c067:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:48:35,140 INFO [master/2d46b487c067:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:48:35,145 INFO [master/2d46b487c067:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:35,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:48:35,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:48:35,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:48:35,192 INFO [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store 2024-12-07T12:48:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:48:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:48:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:48:35,221 INFO [master/2d46b487c067:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T12:48:35,224 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:35,225 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:48:35,225 INFO [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:48:35,226 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:48:35,227 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:48:35,227 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:48:35,227 INFO [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:48:35,228 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733575715225Disabling compacts and flushes for region at 1733575715225Disabling writes for close at 1733575715227 (+2 ms)Writing region close event to WAL at 1733575715227Closed at 1733575715227 2024-12-07T12:48:35,231 WARN [master/2d46b487c067:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/.initializing 2024-12-07T12:48:35,231 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/WALs/2d46b487c067,37233,1733575714217 2024-12-07T12:48:35,238 INFO [master/2d46b487c067:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:35,253 INFO [master/2d46b487c067:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2d46b487c067%2C37233%2C1733575714217, suffix=, logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/WALs/2d46b487c067,37233,1733575714217, archiveDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/oldWALs, maxLogs=10 2024-12-07T12:48:35,288 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/WALs/2d46b487c067,37233,1733575714217/2d46b487c067%2C37233%2C1733575714217.1733575715258, exclude list is [], retry=0 2024-12-07T12:48:35,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:35,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:35,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:35,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:35,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T12:48:35,350 INFO [master/2d46b487c067:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/WALs/2d46b487c067,37233,1733575714217/2d46b487c067%2C37233%2C1733575714217.1733575715258 2024-12-07T12:48:35,351 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:35,351 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:35,352 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:35,354 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,355 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,408 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:48:35,411 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:35,414 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:48:35,417 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,418 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:35,419 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,421 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:48:35,421 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,422 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:35,422 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:48:35,425 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:35,426 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,430 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,432 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,437 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,438 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,442 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:48:35,445 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:48:35,450 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:35,452 INFO [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65335463, jitterRate=-0.026425734162330627}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:48:35,458 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733575715366Initializing all the Stores at 1733575715368 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733575715368Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575715369 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575715369Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575715369Cleaning up temporary data from old regions at 1733575715438 (+69 ms)Region opened successfully at 1733575715458 (+20 ms) 2024-12-07T12:48:35,459 INFO [master/2d46b487c067:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:48:35,488 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25e4e6b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2d46b487c067/172.17.0.2:0 2024-12-07T12:48:35,514 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:48:35,523 INFO [master/2d46b487c067:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:48:35,523 INFO [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:48:35,525 INFO [master/2d46b487c067:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:48:35,526 INFO [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T12:48:35,531 INFO [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-07T12:48:35,531 INFO [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:48:35,553 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:48:35,561 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:48:35,563 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:48:35,565 INFO [master/2d46b487c067:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:48:35,566 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:48:35,567 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:48:35,569 INFO [master/2d46b487c067:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:48:35,572 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:48:35,573 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:48:35,575 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:48:35,576 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:48:35,593 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:48:35,594 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,600 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2d46b487c067,37233,1733575714217, sessionid=0x100b4b97f370000, setting cluster-up flag (Was=false) 2024-12-07T12:48:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,614 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:48:35,615 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2d46b487c067,37233,1733575714217 2024-12-07T12:48:35,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:35,624 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:48:35,625 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2d46b487c067,37233,1733575714217 2024-12-07T12:48:35,631 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:48:35,693 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:48:35,701 INFO [master/2d46b487c067:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:48:35,707 INFO [master/2d46b487c067:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:48:35,712 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2d46b487c067,37233,1733575714217 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:48:35,718 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2d46b487c067:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:48:35,718 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2d46b487c067:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:48:35,718 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2d46b487c067:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:48:35,719 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2d46b487c067:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:48:35,719 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2d46b487c067:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:48:35,719 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,719 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2d46b487c067:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:48:35,719 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,721 INFO [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733575745721 2024-12-07T12:48:35,723 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:48:35,724 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:48:35,726 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(746): ClusterId : b41e2b7d-ff03-4be2-9172-a81b81a933a0 2024-12-07T12:48:35,726 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(746): ClusterId : b41e2b7d-ff03-4be2-9172-a81b81a933a0 2024-12-07T12:48:35,726 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(746): ClusterId : b41e2b7d-ff03-4be2-9172-a81b81a933a0 2024-12-07T12:48:35,726 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:48:35,727 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:48:35,729 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:48:35,729 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:48:35,729 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:48:35,729 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:48:35,729 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:48:35,729 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:48:35,729 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:48:35,730 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,733 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:48:35,733 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:48:35,733 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:48:35,733 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:48:35,733 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:48:35,733 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:48:35,733 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,733 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:48:35,735 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:48:35,736 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:48:35,736 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:48:35,736 DEBUG [RS:0;2d46b487c067:39787 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aa2b0af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2d46b487c067/172.17.0.2:0 2024-12-07T12:48:35,736 DEBUG [RS:2;2d46b487c067:44445 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1179b194, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2d46b487c067/172.17.0.2:0 2024-12-07T12:48:35,737 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:48:35,737 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:48:35,738 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:48:35,739 DEBUG [RS:1;2d46b487c067:39027 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@410f2562, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2d46b487c067/172.17.0.2:0 2024-12-07T12:48:35,742 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:48:35,743 INFO [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:48:35,746 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2d46b487c067:0:becomeActiveMaster-HFileCleaner.large.0-1733575715744,5,FailOnTimeoutGroup] 2024-12-07T12:48:35,749 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2d46b487c067:0:becomeActiveMaster-HFileCleaner.small.0-1733575715747,5,FailOnTimeoutGroup] 2024-12-07T12:48:35,749 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,749 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:48:35,751 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,751 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:48:35,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:48:35,757 DEBUG [RS:0;2d46b487c067:39787 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2d46b487c067:39787 2024-12-07T12:48:35,758 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2d46b487c067:44445 2024-12-07T12:48:35,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:48:35,760 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:48:35,761 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23 2024-12-07T12:48:35,761 INFO [RS:2;2d46b487c067:44445 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:48:35,761 INFO [RS:0;2d46b487c067:39787 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:48:35,761 INFO [RS:2;2d46b487c067:44445 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:48:35,761 INFO [RS:0;2d46b487c067:39787 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:48:35,761 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:48:35,761 DEBUG [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:48:35,764 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(2659): reportForDuty to master=2d46b487c067,37233,1733575714217 with port=39787, startcode=1733575714772 2024-12-07T12:48:35,764 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(2659): reportForDuty to master=2d46b487c067,37233,1733575714217 with port=44445, startcode=1733575714899 2024-12-07T12:48:35,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:48:35,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:48:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:48:35,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:35,777 DEBUG [RS:0;2d46b487c067:39787 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:48:35,777 DEBUG [RS:2;2d46b487c067:44445 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:48:35,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:48:35,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:48:35,782 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:35,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:48:35,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:48:35,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:35,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:48:35,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:48:35,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:35,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:48:35,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:48:35,795 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:35,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:35,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:48:35,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740 2024-12-07T12:48:35,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740 2024-12-07T12:48:35,801 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2d46b487c067:39027 2024-12-07T12:48:35,802 INFO [RS:1;2d46b487c067:39027 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:48:35,802 INFO [RS:1;2d46b487c067:39027 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:48:35,802 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:48:35,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:48:35,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:48:35,803 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(2659): reportForDuty to master=2d46b487c067,37233,1733575714217 with port=39027, startcode=1733575714856 2024-12-07T12:48:35,803 DEBUG [RS:1;2d46b487c067:39027 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:48:35,804 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:48:35,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:48:35,813 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:35,815 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61168850, jitterRate=-0.08851310610771179}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:48:35,816 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43839, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:48:35,816 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55631, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:48:35,816 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45803, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:48:35,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733575715776Initializing all the Stores at 1733575715778 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733575715778Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733575715778Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575715778Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733575715778Cleaning up temporary data from old regions at 1733575715802 (+24 ms)Region opened successfully at 1733575715818 (+16 ms) 2024-12-07T12:48:35,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:48:35,818 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:48:35,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:48:35,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:48:35,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:48:35,820 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:48:35,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733575715818Disabling compacts and flushes for region at 1733575715818Disabling writes for close at 1733575715818Writing region close event to WAL at 1733575715819 (+1 ms)Closed at 1733575715819 2024-12-07T12:48:35,823 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:48:35,823 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:48:35,824 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2d46b487c067,44445,1733575714899 2024-12-07T12:48:35,826 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(517): Registering regionserver=2d46b487c067,44445,1733575714899 2024-12-07T12:48:35,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:48:35,837 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:48:35,837 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2d46b487c067,39027,1733575714856 2024-12-07T12:48:35,838 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(517): Registering regionserver=2d46b487c067,39027,1733575714856 2024-12-07T12:48:35,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2d46b487c067,39787,1733575714772 2024-12-07T12:48:35,840 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:48:35,841 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(517): Registering regionserver=2d46b487c067,39787,1733575714772 2024-12-07T12:48:35,841 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23 2024-12-07T12:48:35,841 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43841 2024-12-07T12:48:35,841 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:48:35,843 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23 2024-12-07T12:48:35,843 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43841 2024-12-07T12:48:35,843 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:48:35,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:48:35,846 DEBUG [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23 2024-12-07T12:48:35,846 DEBUG [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43841 2024-12-07T12:48:35,847 DEBUG [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:48:35,847 DEBUG [RS:2;2d46b487c067:44445 {}] zookeeper.ZKUtil(111): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2d46b487c067,44445,1733575714899 2024-12-07T12:48:35,847 DEBUG [RS:1;2d46b487c067:39027 {}] zookeeper.ZKUtil(111): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2d46b487c067,39027,1733575714856 2024-12-07T12:48:35,847 WARN [RS:2;2d46b487c067:44445 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:48:35,847 WARN [RS:1;2d46b487c067:39027 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:48:35,847 INFO [RS:1;2d46b487c067:39027 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:35,847 INFO [RS:2;2d46b487c067:44445 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:35,847 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,44445,1733575714899 2024-12-07T12:48:35,847 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856 2024-12-07T12:48:35,849 DEBUG [RS:0;2d46b487c067:39787 {}] zookeeper.ZKUtil(111): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2d46b487c067,39787,1733575714772 2024-12-07T12:48:35,849 WARN [RS:0;2d46b487c067:39787 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:48:35,849 INFO [RS:0;2d46b487c067:39787 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:35,849 DEBUG [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39787,1733575714772 2024-12-07T12:48:35,850 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2d46b487c067,44445,1733575714899] 2024-12-07T12:48:35,850 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2d46b487c067,39787,1733575714772] 2024-12-07T12:48:35,850 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2d46b487c067,39027,1733575714856] 2024-12-07T12:48:35,871 INFO [RS:2;2d46b487c067:44445 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:48:35,871 INFO [RS:1;2d46b487c067:39027 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:48:35,871 INFO [RS:0;2d46b487c067:39787 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:48:35,884 INFO [RS:0;2d46b487c067:39787 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:48:35,884 INFO [RS:1;2d46b487c067:39027 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:48:35,884 INFO [RS:2;2d46b487c067:44445 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:48:35,890 INFO [RS:2;2d46b487c067:44445 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:48:35,890 INFO [RS:1;2d46b487c067:39027 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:48:35,890 INFO [RS:0;2d46b487c067:39787 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:48:35,891 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,891 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,891 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,893 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:48:35,893 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:48:35,893 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:48:35,900 INFO [RS:1;2d46b487c067:39027 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:48:35,900 INFO [RS:0;2d46b487c067:39787 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:48:35,900 INFO [RS:2;2d46b487c067:44445 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:48:35,902 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,902 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,902 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,902 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,902 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,902 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,903 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2d46b487c067:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:48:35,904 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2d46b487c067:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:48:35,904 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:2;2d46b487c067:44445 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2d46b487c067:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:48:35,904 DEBUG [RS:1;2d46b487c067:39027 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2d46b487c067:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:48:35,904 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2d46b487c067:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:48:35,904 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2d46b487c067:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:48:35,904 DEBUG [RS:0;2d46b487c067:39787 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2d46b487c067:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:48:35,908 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,908 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,908 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,908 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,908 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,908 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,39787,1733575714772-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,44445,1733575714899-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:48:35,909 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,39027,1733575714856-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:48:35,927 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:48:35,928 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:48:35,929 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,44445,1733575714899-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,929 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,39787,1733575714772-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,929 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,929 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,930 INFO [RS:2;2d46b487c067:44445 {}] regionserver.Replication(171): 2d46b487c067,44445,1733575714899 started 2024-12-07T12:48:35,930 INFO [RS:0;2d46b487c067:39787 {}] regionserver.Replication(171): 2d46b487c067,39787,1733575714772 started 2024-12-07T12:48:35,930 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:48:35,931 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,39027,1733575714856-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,931 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,931 INFO [RS:1;2d46b487c067:39027 {}] regionserver.Replication(171): 2d46b487c067,39027,1733575714856 started 2024-12-07T12:48:35,946 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,946 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1482): Serving as 2d46b487c067,44445,1733575714899, RpcServer on 2d46b487c067/172.17.0.2:44445, sessionid=0x100b4b97f370003 2024-12-07T12:48:35,947 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:48:35,947 DEBUG [RS:2;2d46b487c067:44445 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2d46b487c067,44445,1733575714899 2024-12-07T12:48:35,947 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2d46b487c067,44445,1733575714899' 2024-12-07T12:48:35,947 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:48:35,948 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:48:35,949 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:48:35,949 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:48:35,949 DEBUG [RS:2;2d46b487c067:44445 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2d46b487c067,44445,1733575714899 2024-12-07T12:48:35,949 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2d46b487c067,44445,1733575714899' 2024-12-07T12:48:35,949 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:48:35,950 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:48:35,951 DEBUG [RS:2;2d46b487c067:44445 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:48:35,951 INFO [RS:2;2d46b487c067:44445 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:48:35,951 INFO [RS:2;2d46b487c067:44445 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:48:35,951 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,951 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1482): Serving as 2d46b487c067,39027,1733575714856, RpcServer on 2d46b487c067/172.17.0.2:39027, sessionid=0x100b4b97f370002 2024-12-07T12:48:35,951 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:35,952 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(1482): Serving as 2d46b487c067,39787,1733575714772, RpcServer on 2d46b487c067/172.17.0.2:39787, sessionid=0x100b4b97f370001 2024-12-07T12:48:35,952 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:48:35,952 DEBUG [RS:1;2d46b487c067:39027 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2d46b487c067,39027,1733575714856 2024-12-07T12:48:35,952 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2d46b487c067,39027,1733575714856' 2024-12-07T12:48:35,952 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:48:35,952 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:48:35,952 DEBUG [RS:0;2d46b487c067:39787 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2d46b487c067,39787,1733575714772 2024-12-07T12:48:35,952 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2d46b487c067,39787,1733575714772' 2024-12-07T12:48:35,952 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:48:35,953 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:48:35,953 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:48:35,953 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:48:35,953 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:48:35,953 DEBUG [RS:1;2d46b487c067:39027 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2d46b487c067,39027,1733575714856 2024-12-07T12:48:35,953 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:48:35,953 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2d46b487c067,39027,1733575714856' 2024-12-07T12:48:35,953 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:48:35,953 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:48:35,953 DEBUG [RS:0;2d46b487c067:39787 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2d46b487c067,39787,1733575714772 2024-12-07T12:48:35,953 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2d46b487c067,39787,1733575714772' 2024-12-07T12:48:35,953 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:48:35,954 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:48:35,954 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:48:35,954 DEBUG [RS:1;2d46b487c067:39027 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:48:35,954 INFO [RS:1;2d46b487c067:39027 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:48:35,955 DEBUG [RS:0;2d46b487c067:39787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:48:35,955 INFO [RS:1;2d46b487c067:39027 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:48:35,955 INFO [RS:0;2d46b487c067:39787 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:48:35,955 INFO [RS:0;2d46b487c067:39787 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:48:35,992 WARN [2d46b487c067:37233 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:48:36,057 INFO [RS:0;2d46b487c067:39787 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:36,057 INFO [RS:1;2d46b487c067:39027 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:36,057 INFO [RS:2;2d46b487c067:44445 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:36,060 INFO [RS:2;2d46b487c067:44445 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2d46b487c067%2C44445%2C1733575714899, suffix=, logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,44445,1733575714899, archiveDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs, maxLogs=32 2024-12-07T12:48:36,060 INFO [RS:1;2d46b487c067:39027 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2d46b487c067%2C39027%2C1733575714856, suffix=, logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856, archiveDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs, maxLogs=32 2024-12-07T12:48:36,060 INFO [RS:0;2d46b487c067:39787 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2d46b487c067%2C39787%2C1733575714772, suffix=, logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39787,1733575714772, archiveDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs, maxLogs=32 2024-12-07T12:48:36,085 DEBUG [RS:0;2d46b487c067:39787 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39787,1733575714772/2d46b487c067%2C39787%2C1733575714772.1733575716065, exclude list is [], retry=0 2024-12-07T12:48:36,085 DEBUG [RS:1;2d46b487c067:39027 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856/2d46b487c067%2C39027%2C1733575714856.1733575716065, exclude list is [], retry=0 2024-12-07T12:48:36,085 DEBUG [RS:2;2d46b487c067:44445 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,44445,1733575714899/2d46b487c067%2C44445%2C1733575714899.1733575716065, exclude list is [], retry=0 2024-12-07T12:48:36,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:36,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:36,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:36,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:36,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:36,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:36,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:36,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:36,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:36,108 INFO [RS:0;2d46b487c067:39787 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39787,1733575714772/2d46b487c067%2C39787%2C1733575714772.1733575716065 2024-12-07T12:48:36,115 INFO [RS:1;2d46b487c067:39027 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856/2d46b487c067%2C39027%2C1733575714856.1733575716065 2024-12-07T12:48:36,116 INFO [RS:2;2d46b487c067:44445 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,44445,1733575714899/2d46b487c067%2C44445%2C1733575714899.1733575716065 2024-12-07T12:48:36,116 DEBUG [RS:1;2d46b487c067:39027 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:48:36,116 DEBUG [RS:0;2d46b487c067:39787 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:36,117 DEBUG [RS:2;2d46b487c067:44445 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:48:36,244 DEBUG [2d46b487c067:37233 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T12:48:36,252 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(204): Hosts are {2d46b487c067=0} racks are {/default-rack=0} 2024-12-07T12:48:36,259 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T12:48:36,259 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T12:48:36,259 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T12:48:36,259 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T12:48:36,259 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T12:48:36,259 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T12:48:36,259 INFO [2d46b487c067:37233 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T12:48:36,259 INFO [2d46b487c067:37233 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T12:48:36,259 INFO [2d46b487c067:37233 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T12:48:36,259 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T12:48:36,266 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2d46b487c067,44445,1733575714899 2024-12-07T12:48:36,273 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2d46b487c067,44445,1733575714899, state=OPENING 2024-12-07T12:48:36,277 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:48:36,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:36,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:36,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:36,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:48:36,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,282 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:48:36,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2d46b487c067,44445,1733575714899}] 2024-12-07T12:48:36,460 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:48:36,462 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44435, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:48:36,473 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:48:36,474 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:36,474 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T12:48:36,478 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2d46b487c067%2C44445%2C1733575714899.meta, suffix=.meta, logDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,44445,1733575714899, archiveDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs, maxLogs=32 2024-12-07T12:48:36,493 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,44445,1733575714899/2d46b487c067%2C44445%2C1733575714899.meta.1733575716480.meta, exclude list is [], retry=0 2024-12-07T12:48:36,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:36,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:36,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:36,500 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,44445,1733575714899/2d46b487c067%2C44445%2C1733575714899.meta.1733575716480.meta 2024-12-07T12:48:36,501 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:48:36,501 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:36,503 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:48:36,505 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:48:36,510 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:48:36,514 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:48:36,514 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:36,514 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:48:36,515 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:48:36,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:48:36,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:48:36,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:36,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:36,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:48:36,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:48:36,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:36,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:36,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:48:36,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:48:36,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:36,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:36,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:48:36,528 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:48:36,528 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:36,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:48:36,529 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:48:36,531 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740 2024-12-07T12:48:36,534 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740 2024-12-07T12:48:36,537 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:48:36,537 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:48:36,538 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:48:36,541 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:48:36,543 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61045443, jitterRate=-0.09035201370716095}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:48:36,543 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:48:36,545 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733575716515Writing region info on filesystem at 1733575716515Initializing all the Stores at 1733575716518 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733575716518Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733575716518Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575716518Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733575716518Cleaning up temporary data from old regions at 1733575716537 (+19 ms)Running coprocessor post-open hooks at 1733575716543 (+6 ms)Region opened successfully at 1733575716545 (+2 ms) 2024-12-07T12:48:36,552 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733575716453 2024-12-07T12:48:36,562 DEBUG [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:48:36,562 INFO [RS_OPEN_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:48:36,564 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2d46b487c067,44445,1733575714899 2024-12-07T12:48:36,566 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2d46b487c067,44445,1733575714899, state=OPEN 2024-12-07T12:48:36,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:48:36,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:48:36,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:48:36,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:48:36,568 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,568 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,568 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,568 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:48:36,569 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2d46b487c067,44445,1733575714899 2024-12-07T12:48:36,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:48:36,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2d46b487c067,44445,1733575714899 in 285 msec 2024-12-07T12:48:36,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:48:36,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 748 msec 2024-12-07T12:48:36,582 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:48:36,582 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:48:36,601 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:48:36,602 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2d46b487c067,44445,1733575714899, seqNum=-1] 2024-12-07T12:48:36,621 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:48:36,623 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42777, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:48:36,640 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 981 msec 2024-12-07T12:48:36,640 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733575716640, completionTime=-1 2024-12-07T12:48:36,642 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T12:48:36,642 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:48:36,670 INFO [master/2d46b487c067:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T12:48:36,670 INFO [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733575776670 2024-12-07T12:48:36,670 INFO [master/2d46b487c067:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733575836670 2024-12-07T12:48:36,670 INFO [master/2d46b487c067:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-07T12:48:36,671 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T12:48:36,696 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,37233,1733575714217-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:36,697 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,37233,1733575714217-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:36,697 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,37233,1733575714217-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:36,699 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2d46b487c067:37233, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:36,699 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:36,703 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:36,708 DEBUG [master/2d46b487c067:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:48:36,739 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.770sec 2024-12-07T12:48:36,741 INFO [master/2d46b487c067:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:48:36,742 INFO [master/2d46b487c067:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:48:36,744 INFO [master/2d46b487c067:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:48:36,744 INFO [master/2d46b487c067:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:48:36,744 INFO [master/2d46b487c067:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:48:36,745 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,37233,1733575714217-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:48:36,746 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,37233,1733575714217-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:48:36,751 DEBUG [master/2d46b487c067:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:48:36,752 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:48:36,752 INFO [master/2d46b487c067:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2d46b487c067,37233,1733575714217-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:36,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7253cf59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:48:36,841 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2d46b487c067,37233,-1 for getting cluster id 2024-12-07T12:48:36,843 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:48:36,853 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b41e2b7d-ff03-4be2-9172-a81b81a933a0' 2024-12-07T12:48:36,856 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:48:36,856 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b41e2b7d-ff03-4be2-9172-a81b81a933a0" 2024-12-07T12:48:36,857 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45e4a0aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:48:36,857 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2d46b487c067,37233,-1] 2024-12-07T12:48:36,860 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:48:36,862 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:48:36,863 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55584, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:48:36,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39fa7867, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:48:36,867 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:48:36,873 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2d46b487c067,44445,1733575714899, seqNum=-1] 2024-12-07T12:48:36,874 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:48:36,876 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56912, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:48:36,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2d46b487c067,37233,1733575714217 2024-12-07T12:48:36,894 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:43841/hbase 2024-12-07T12:48:36,907 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testNameConflictWhenSplit0 Thread=363, OpenFileDescriptor=607, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=6090 2024-12-07T12:48:36,924 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:36,927 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:36,928 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:36,932 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-40994054, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-40994054, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:36,946 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-40994054/hregion-40994054.1733575716934, exclude list is [], retry=0 2024-12-07T12:48:36,951 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:36,951 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:36,951 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:36,954 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-40994054/hregion-40994054.1733575716934 2024-12-07T12:48:36,954 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:48:36,954 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => e9b609da711f49fb30e33c151d571116, NAME => 'testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:48:36,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741838_1014 (size=64) 2024-12-07T12:48:36,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741838_1014 (size=64) 2024-12-07T12:48:36,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741838_1014 (size=64) 2024-12-07T12:48:36,970 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:36,972 INFO [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:36,974 INFO [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9b609da711f49fb30e33c151d571116 columnFamilyName a 2024-12-07T12:48:36,974 DEBUG [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:36,975 INFO [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] regionserver.HStore(327): Store=e9b609da711f49fb30e33c151d571116/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:36,975 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:36,976 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:36,977 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:36,977 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:36,977 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:36,980 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:36,984 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:36,985 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e9b609da711f49fb30e33c151d571116; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71579322, jitterRate=0.0666150152683258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:48:36,987 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e9b609da711f49fb30e33c151d571116: Writing region info on filesystem at 1733575716970Initializing all the Stores at 1733575716971 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575716971Cleaning up temporary data from old regions at 1733575716977 (+6 ms)Region opened successfully at 1733575716987 (+10 ms) 2024-12-07T12:48:36,987 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e9b609da711f49fb30e33c151d571116, disabling compactions & flushes 2024-12-07T12:48:36,987 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116. 2024-12-07T12:48:36,987 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116. 2024-12-07T12:48:36,987 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116. after waiting 0 ms 2024-12-07T12:48:36,987 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116. 2024-12-07T12:48:36,988 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116. 2024-12-07T12:48:36,988 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e9b609da711f49fb30e33c151d571116: Waiting for close lock at 1733575716987Disabling compacts and flushes for region at 1733575716987Disabling writes for close at 1733575716987Writing region close event to WAL at 1733575716988 (+1 ms)Closed at 1733575716988 2024-12-07T12:48:36,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741837_1013 (size=93) 2024-12-07T12:48:36,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741837_1013 (size=93) 2024-12-07T12:48:36,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741837_1013 (size=93) 2024-12-07T12:48:36,999 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:36,999 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-40994054:(num 1733575716934) 2024-12-07T12:48:37,001 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T12:48:37,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741839_1015 (size=369) 2024-12-07T12:48:37,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741839_1015 (size=369) 2024-12-07T12:48:37,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741839_1015 (size=369) 2024-12-07T12:48:37,019 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T12:48:37,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741840_1016 (size=231) 2024-12-07T12:48:37,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741840_1016 (size=231) 2024-12-07T12:48:37,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741840_1016 (size=231) 2024-12-07T12:48:37,052 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1, size=369 (369bytes) 2024-12-07T12:48:37,053 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-07T12:48:37,053 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-07T12:48:37,053 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1 2024-12-07T12:48:37,058 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1 after 3ms 2024-12-07T12:48:37,064 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1 took 13ms 2024-12-07T12:48:37,067 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1 so closing down 2024-12-07T12:48:37,070 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-07T12:48:37,072 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000001-wal-1.temp 2024-12-07T12:48:37,072 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:37,073 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:37,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741841_1017 (size=369) 2024-12-07T12:48:37,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741841_1017 (size=369) 2024-12-07T12:48:37,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741841_1017 (size=369) 2024-12-07T12:48:37,086 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-07T12:48:37,088 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002 2024-12-07T12:48:37,092 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1, size=369, length=369, corrupted=false, cancelled=false 2024-12-07T12:48:37,092 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1, journal: Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1, size=369 (369bytes) at 1733575717052Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1 so closing down at 1733575717067 (+15 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000001-wal-1.temp at 1733575717072 (+5 ms)3 split writer threads finished at 1733575717073 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733575717086 (+13 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002 at 1733575717089 (+3 ms)Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-1, size=369, length=369, corrupted=false, cancelled=false at 1733575717092 (+3 ms) 2024-12-07T12:48:37,105 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2, size=231 (231bytes) 2024-12-07T12:48:37,106 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2 2024-12-07T12:48:37,106 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2 after 0ms 2024-12-07T12:48:37,110 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2 took 5ms 2024-12-07T12:48:37,112 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2 so closing down 2024-12-07T12:48:37,112 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:37,115 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-07T12:48:37,116 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002-wal-2.temp 2024-12-07T12:48:37,116 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:37,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741842_1018 (size=231) 2024-12-07T12:48:37,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741842_1018 (size=231) 2024-12-07T12:48:37,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741842_1018 (size=231) 2024-12-07T12:48:37,126 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-07T12:48:37,132 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002-wal-2.temp, length=231 2024-12-07T12:48:37,134 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2, size=231, length=231, corrupted=false, cancelled=false 2024-12-07T12:48:37,134 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2, journal: Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2, size=231 (231bytes) at 1733575717106Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2 so closing down at 1733575717112 (+6 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002-wal-2.temp at 1733575717116 (+4 ms)3 split writer threads finished at 1733575717117 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733575717126 (+9 ms)Processed 1 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal-2, size=231, length=231, corrupted=false, cancelled=false at 1733575717134 (+8 ms) 2024-12-07T12:48:37,134 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:37,137 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:37,149 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal.1733575717138, exclude list is [], retry=0 2024-12-07T12:48:37,153 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:37,154 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:37,154 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:37,156 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal.1733575717138 2024-12-07T12:48:37,157 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:37,157 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e9b609da711f49fb30e33c151d571116, NAME => 'testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:37,157 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:37,157 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,157 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,159 INFO [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,160 INFO [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9b609da711f49fb30e33c151d571116 columnFamilyName a 2024-12-07T12:48:37,161 DEBUG [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:37,161 INFO [StoreOpener-e9b609da711f49fb30e33c151d571116-1 {}] regionserver.HStore(327): Store=e9b609da711f49fb30e33c151d571116/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:37,162 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,163 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,167 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,168 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002 2024-12-07T12:48:37,177 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002 2024-12-07T12:48:37,182 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e9b609da711f49fb30e33c151d571116 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-07T12:48:37,227 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/.tmp/a/03a52235e28d4c5ea16dc73b845d53e3 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733575717000/Put/seqid=0 2024-12-07T12:48:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741844_1020 (size=5170) 2024-12-07T12:48:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741844_1020 (size=5170) 2024-12-07T12:48:37,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741844_1020 (size=5170) 2024-12-07T12:48:37,239 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/.tmp/a/03a52235e28d4c5ea16dc73b845d53e3 2024-12-07T12:48:37,278 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/.tmp/a/03a52235e28d4c5ea16dc73b845d53e3 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/a/03a52235e28d4c5ea16dc73b845d53e3 2024-12-07T12:48:37,287 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/a/03a52235e28d4c5ea16dc73b845d53e3, entries=2, sequenceid=2, filesize=5.0 K 2024-12-07T12:48:37,292 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for e9b609da711f49fb30e33c151d571116 in 110ms, sequenceid=2, compaction requested=false; wal=null 2024-12-07T12:48:37,293 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/0000000000000000002 2024-12-07T12:48:37,294 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,294 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,297 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e9b609da711f49fb30e33c151d571116 2024-12-07T12:48:37,300 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e9b609da711f49fb30e33c151d571116/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-07T12:48:37,302 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e9b609da711f49fb30e33c151d571116; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69678805, jitterRate=0.03829510509967804}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:48:37,303 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e9b609da711f49fb30e33c151d571116: Writing region info on filesystem at 1733575717157Initializing all the Stores at 1733575717159 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575717159Obtaining lock to block concurrent updates at 1733575717182 (+23 ms)Preparing flush snapshotting stores in e9b609da711f49fb30e33c151d571116 at 1733575717182Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733575717185 (+3 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733575716924.e9b609da711f49fb30e33c151d571116. at 1733575717185Flushing e9b609da711f49fb30e33c151d571116/a: creating writer at 1733575717186 (+1 ms)Flushing e9b609da711f49fb30e33c151d571116/a: appending metadata at 1733575717219 (+33 ms)Flushing e9b609da711f49fb30e33c151d571116/a: closing flushed file at 1733575717221 (+2 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59e9136: reopening flushed file at 1733575717277 (+56 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for e9b609da711f49fb30e33c151d571116 in 110ms, sequenceid=2, compaction requested=false; wal=null at 1733575717292 (+15 ms)Cleaning up temporary data from old regions at 1733575717294 (+2 ms)Region opened successfully at 1733575717303 (+9 ms) 2024-12-07T12:48:37,327 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testNameConflictWhenSplit0 Thread=375 (was 363) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50530 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:57960 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58092 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50512 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:34718 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:43841/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:34558 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=703 (was 607) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 215), ProcessCount=11 (was 11), AvailableMemoryMB=6077 (was 6090) 2024-12-07T12:48:37,339 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testNameConflictWhenSplit1 Thread=375, OpenFileDescriptor=703, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=6076 2024-12-07T12:48:37,355 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:37,357 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:37,358 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:37,361 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-96488790, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-96488790, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:37,373 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-96488790/hregion-96488790.1733575717362, exclude list is [], retry=0 2024-12-07T12:48:37,377 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:37,377 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:37,377 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:37,380 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-96488790/hregion-96488790.1733575717362 2024-12-07T12:48:37,380 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:37,381 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => e5467264b2ddd9b7ba97abddd06956e0, NAME => 'testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:48:37,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741846_1022 (size=64) 2024-12-07T12:48:37,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741846_1022 (size=64) 2024-12-07T12:48:37,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741846_1022 (size=64) 2024-12-07T12:48:37,399 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:37,401 INFO [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,403 INFO [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e5467264b2ddd9b7ba97abddd06956e0 columnFamilyName a 2024-12-07T12:48:37,403 DEBUG [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:37,404 INFO [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] regionserver.HStore(327): Store=e5467264b2ddd9b7ba97abddd06956e0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:37,404 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,405 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,405 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,406 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,406 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,408 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,411 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:37,412 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e5467264b2ddd9b7ba97abddd06956e0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60527459, jitterRate=-0.09807057678699493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:48:37,413 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e5467264b2ddd9b7ba97abddd06956e0: Writing region info on filesystem at 1733575717399Initializing all the Stores at 1733575717401 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575717401Cleaning up temporary data from old regions at 1733575717406 (+5 ms)Region opened successfully at 1733575717412 (+6 ms) 2024-12-07T12:48:37,413 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e5467264b2ddd9b7ba97abddd06956e0, disabling compactions & flushes 2024-12-07T12:48:37,413 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0. 2024-12-07T12:48:37,413 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0. 2024-12-07T12:48:37,413 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0. after waiting 0 ms 2024-12-07T12:48:37,413 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0. 2024-12-07T12:48:37,413 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0. 2024-12-07T12:48:37,414 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e5467264b2ddd9b7ba97abddd06956e0: Waiting for close lock at 1733575717413Disabling compacts and flushes for region at 1733575717413Disabling writes for close at 1733575717413Writing region close event to WAL at 1733575717413Closed at 1733575717413 2024-12-07T12:48:37,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741845_1021 (size=93) 2024-12-07T12:48:37,418 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-96488790/hregion-96488790.1733575717362 not finished, retry = 0 2024-12-07T12:48:37,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741845_1021 (size=93) 2024-12-07T12:48:37,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741845_1021 (size=93) 2024-12-07T12:48:37,524 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:37,524 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-96488790:(num 1733575717362) 2024-12-07T12:48:37,526 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T12:48:37,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741847_1023 (size=369) 2024-12-07T12:48:37,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741847_1023 (size=369) 2024-12-07T12:48:37,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741847_1023 (size=369) 2024-12-07T12:48:37,541 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-07T12:48:37,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741848_1024 (size=231) 2024-12-07T12:48:37,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741848_1024 (size=231) 2024-12-07T12:48:37,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741848_1024 (size=231) 2024-12-07T12:48:37,567 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2, size=231 (231bytes) 2024-12-07T12:48:37,567 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2 2024-12-07T12:48:37,569 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2 after 1ms 2024-12-07T12:48:37,572 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2 took 5ms 2024-12-07T12:48:37,574 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2 so closing down 2024-12-07T12:48:37,575 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:37,577 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-07T12:48:37,578 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002-wal-2.temp 2024-12-07T12:48:37,579 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:37,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741849_1025 (size=231) 2024-12-07T12:48:37,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741849_1025 (size=231) 2024-12-07T12:48:37,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741849_1025 (size=231) 2024-12-07T12:48:37,586 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-07T12:48:37,588 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002 2024-12-07T12:48:37,588 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2, size=231, length=231, corrupted=false, cancelled=false 2024-12-07T12:48:37,588 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2, journal: Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2, size=231 (231bytes) at 1733575717567Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2 so closing down at 1733575717575 (+8 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002-wal-2.temp at 1733575717578 (+3 ms)3 split writer threads finished at 1733575717579 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733575717586 (+7 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002 at 1733575717588 (+2 ms)Processed 1 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-2, size=231, length=231, corrupted=false, cancelled=false at 1733575717588 2024-12-07T12:48:37,603 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1, size=369 (369bytes) 2024-12-07T12:48:37,603 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1 2024-12-07T12:48:37,604 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1 after 1ms 2024-12-07T12:48:37,607 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1 took 4ms 2024-12-07T12:48:37,609 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1 so closing down 2024-12-07T12:48:37,609 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:37,611 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-07T12:48:37,612 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000001-wal-1.temp 2024-12-07T12:48:37,613 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:37,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741850_1026 (size=369) 2024-12-07T12:48:37,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741850_1026 (size=369) 2024-12-07T12:48:37,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741850_1026 (size=369) 2024-12-07T12:48:37,622 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-07T12:48:37,628 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002, length=231 2024-12-07T12:48:37,630 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002 2024-12-07T12:48:37,630 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 22 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1, size=369, length=369, corrupted=false, cancelled=false 2024-12-07T12:48:37,631 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1, journal: Splitting hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1, size=369 (369bytes) at 1733575717603Finishing writing output for hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1 so closing down at 1733575717609 (+6 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000001-wal-1.temp at 1733575717612 (+3 ms)3 split writer threads finished at 1733575717613 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733575717622 (+9 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002 at 1733575717630 (+8 ms)Processed 2 edits across 1 Regions in 22 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal-1, size=369, length=369, corrupted=false, cancelled=false at 1733575717630 2024-12-07T12:48:37,631 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:37,633 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:37,644 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal.1733575717633, exclude list is [], retry=0 2024-12-07T12:48:37,647 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:37,648 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:37,648 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:37,650 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal.1733575717633 2024-12-07T12:48:37,651 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:37,651 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e5467264b2ddd9b7ba97abddd06956e0, NAME => 'testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:37,651 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:37,651 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,651 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,653 INFO [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,656 INFO [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e5467264b2ddd9b7ba97abddd06956e0 columnFamilyName a 2024-12-07T12:48:37,656 DEBUG [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:37,657 INFO [StoreOpener-e5467264b2ddd9b7ba97abddd06956e0-1 {}] regionserver.HStore(327): Store=e5467264b2ddd9b7ba97abddd06956e0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:37,657 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,658 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,661 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,662 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002 2024-12-07T12:48:37,666 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002 2024-12-07T12:48:37,666 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e5467264b2ddd9b7ba97abddd06956e0 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-07T12:48:37,681 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/.tmp/a/1003b7b7f19546ffad656158654f6045 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733575717524/Put/seqid=0 2024-12-07T12:48:37,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741852_1028 (size=5170) 2024-12-07T12:48:37,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741852_1028 (size=5170) 2024-12-07T12:48:37,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741852_1028 (size=5170) 2024-12-07T12:48:37,691 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/.tmp/a/1003b7b7f19546ffad656158654f6045 2024-12-07T12:48:37,701 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/.tmp/a/1003b7b7f19546ffad656158654f6045 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/a/1003b7b7f19546ffad656158654f6045 2024-12-07T12:48:37,710 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/a/1003b7b7f19546ffad656158654f6045, entries=2, sequenceid=2, filesize=5.0 K 2024-12-07T12:48:37,710 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for e5467264b2ddd9b7ba97abddd06956e0 in 44ms, sequenceid=2, compaction requested=false; wal=null 2024-12-07T12:48:37,712 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/0000000000000000002 2024-12-07T12:48:37,712 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,712 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,716 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e5467264b2ddd9b7ba97abddd06956e0 2024-12-07T12:48:37,719 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/e5467264b2ddd9b7ba97abddd06956e0/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-07T12:48:37,721 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e5467264b2ddd9b7ba97abddd06956e0; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71598828, jitterRate=0.066905677318573}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:48:37,721 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e5467264b2ddd9b7ba97abddd06956e0: Writing region info on filesystem at 1733575717651Initializing all the Stores at 1733575717653 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575717653Obtaining lock to block concurrent updates at 1733575717666 (+13 ms)Preparing flush snapshotting stores in e5467264b2ddd9b7ba97abddd06956e0 at 1733575717666Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733575717666Flushing stores of testReplayEditsWrittenIntoWAL,,1733575717355.e5467264b2ddd9b7ba97abddd06956e0. at 1733575717666Flushing e5467264b2ddd9b7ba97abddd06956e0/a: creating writer at 1733575717666Flushing e5467264b2ddd9b7ba97abddd06956e0/a: appending metadata at 1733575717680 (+14 ms)Flushing e5467264b2ddd9b7ba97abddd06956e0/a: closing flushed file at 1733575717680Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49e2ddd3: reopening flushed file at 1733575717699 (+19 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for e5467264b2ddd9b7ba97abddd06956e0 in 44ms, sequenceid=2, compaction requested=false; wal=null at 1733575717710 (+11 ms)Cleaning up temporary data from old regions at 1733575717712 (+2 ms)Region opened successfully at 1733575717721 (+9 ms) 2024-12-07T12:48:37,739 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testNameConflictWhenSplit1 Thread=385 (was 375) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58178 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50512 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:34794 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50602 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:34558 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:57960 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=785 (was 703) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 215), ProcessCount=11 (was 11), AvailableMemoryMB=6071 (was 6076) 2024-12-07T12:48:37,750 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testReplayEditsWrittenIntoWAL Thread=385, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=6070 2024-12-07T12:48:37,769 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:37,772 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:37,773 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:37,777 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-15385167, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-15385167, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:37,794 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-15385167/hregion-15385167.1733575717778, exclude list is [], retry=0 2024-12-07T12:48:37,798 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:37,798 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:37,799 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:37,802 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-15385167/hregion-15385167.1733575717778 2024-12-07T12:48:37,802 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:37,802 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 322d6909faf94a81cc9b8c7b3915c0a9, NAME => 'testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:48:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741854_1030 (size=64) 2024-12-07T12:48:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741854_1030 (size=64) 2024-12-07T12:48:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741854_1030 (size=64) 2024-12-07T12:48:37,814 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:37,815 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,817 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 322d6909faf94a81cc9b8c7b3915c0a9 columnFamilyName a 2024-12-07T12:48:37,817 DEBUG [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:37,818 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(327): Store=322d6909faf94a81cc9b8c7b3915c0a9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:37,818 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,819 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 322d6909faf94a81cc9b8c7b3915c0a9 columnFamilyName b 2024-12-07T12:48:37,820 DEBUG [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:37,820 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(327): Store=322d6909faf94a81cc9b8c7b3915c0a9/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:37,820 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,822 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 322d6909faf94a81cc9b8c7b3915c0a9 columnFamilyName c 2024-12-07T12:48:37,822 DEBUG [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:37,823 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(327): Store=322d6909faf94a81cc9b8c7b3915c0a9/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:37,823 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,824 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,825 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,826 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,826 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,827 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:37,828 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:37,831 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:37,832 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 322d6909faf94a81cc9b8c7b3915c0a9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62664152, jitterRate=-0.066231369972229}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:37,832 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 322d6909faf94a81cc9b8c7b3915c0a9: Writing region info on filesystem at 1733575717814Initializing all the Stores at 1733575717815 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575717815Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575717815Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575717815Cleaning up temporary data from old regions at 1733575717826 (+11 ms)Region opened successfully at 1733575717832 (+6 ms) 2024-12-07T12:48:37,832 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 322d6909faf94a81cc9b8c7b3915c0a9, disabling compactions & flushes 2024-12-07T12:48:37,832 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:37,832 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:37,832 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. after waiting 0 ms 2024-12-07T12:48:37,832 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:37,833 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:37,833 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 322d6909faf94a81cc9b8c7b3915c0a9: Waiting for close lock at 1733575717832Disabling compacts and flushes for region at 1733575717832Disabling writes for close at 1733575717832Writing region close event to WAL at 1733575717833 (+1 ms)Closed at 1733575717833 2024-12-07T12:48:37,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741853_1029 (size=93) 2024-12-07T12:48:37,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741853_1029 (size=93) 2024-12-07T12:48:37,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741853_1029 (size=93) 2024-12-07T12:48:37,840 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:37,840 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-15385167:(num 1733575717778) 2024-12-07T12:48:37,840 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:37,842 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:37,856 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843, exclude list is [], retry=0 2024-12-07T12:48:37,859 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:37,859 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:37,859 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:37,861 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 2024-12-07T12:48:37,862 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:38,035 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843, size=0 (0bytes) 2024-12-07T12:48:38,035 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 might be still open, length is 0 2024-12-07T12:48:38,036 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 2024-12-07T12:48:38,036 WARN [IPC Server handler 1 on default port 43841 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-12-07T12:48:38,037 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 after 1ms 2024-12-07T12:48:39,243 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50636 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50636 dst: /127.0.0.1:35169 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35169 remote=/127.0.0.1:50636]. Total timeout mills is 60000, 58766 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:39,244 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58210 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:36341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58210 dst: /127.0.0.1:36341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:39,244 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:34830 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34830 dst: /127.0.0.1:46077 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:39,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741855_1032 (size=470675) 2024-12-07T12:48:39,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741855_1032 (size=470675) 2024-12-07T12:48:39,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741855_1032 (size=470675) 2024-12-07T12:48:42,024 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T12:48:42,038 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 after 4001ms 2024-12-07T12:48:42,043 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 took 4008ms 2024-12-07T12:48:42,047 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733575717843.temp 2024-12-07T12:48:42,049 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000000001-wal.1733575717843.temp 2024-12-07T12:48:42,101 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T12:48:42,144 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843; continuing. 2024-12-07T12:48:42,144 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 so closing down 2024-12-07T12:48:42,144 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:42,144 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:42,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741856_1033 (size=470683) 2024-12-07T12:48:42,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741856_1033 (size=470683) 2024-12-07T12:48:42,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741856_1033 (size=470683) 2024-12-07T12:48:42,149 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000000001-wal.1733575717843.temp (wrote 3002 edits, skipped 0 edits in 33 ms) 2024-12-07T12:48:42,151 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000000001-wal.1733575717843.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000003002 2024-12-07T12:48:42,151 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 108 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T12:48:42,152 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843, journal: Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843, size=0 (0bytes) at 1733575718035Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000000001-wal.1733575717843.temp at 1733575722049 (+4014 ms)Split 1024 edits, skipped 0 edits. at 1733575722107 (+58 ms)Split 2048 edits, skipped 0 edits. at 1733575722129 (+22 ms)Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 so closing down at 1733575722144 (+15 ms)3 split writer threads finished at 1733575722145 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000000001-wal.1733575717843.temp (wrote 3002 edits, skipped 0 edits in 33 ms) at 1733575722149 (+4 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000000001-wal.1733575717843.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000003002 at 1733575722151 (+2 ms)Processed 3002 edits across 1 Regions in 108 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843, size=0, length=0, corrupted=false, cancelled=false at 1733575722151 2024-12-07T12:48:42,154 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575717843 2024-12-07T12:48:42,155 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000003002 2024-12-07T12:48:42,156 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:42,158 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:42,170 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575722158, exclude list is [], retry=0 2024-12-07T12:48:42,173 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:42,173 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:42,174 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:42,176 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575722158 2024-12-07T12:48:42,176 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:42,177 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:42,179 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,180 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 322d6909faf94a81cc9b8c7b3915c0a9 columnFamilyName a 2024-12-07T12:48:42,180 DEBUG [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:42,181 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(327): Store=322d6909faf94a81cc9b8c7b3915c0a9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:42,181 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,182 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 322d6909faf94a81cc9b8c7b3915c0a9 columnFamilyName b 2024-12-07T12:48:42,182 DEBUG [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:42,183 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(327): Store=322d6909faf94a81cc9b8c7b3915c0a9/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:42,183 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,184 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 322d6909faf94a81cc9b8c7b3915c0a9 columnFamilyName c 2024-12-07T12:48:42,184 DEBUG [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:42,185 INFO [StoreOpener-322d6909faf94a81cc9b8c7b3915c0a9-1 {}] regionserver.HStore(327): Store=322d6909faf94a81cc9b8c7b3915c0a9/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:42,185 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,186 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,188 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,189 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000003002 2024-12-07T12:48:42,222 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T12:48:42,529 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 322d6909faf94a81cc9b8c7b3915c0a9 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-07T12:48:42,565 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/a/6cfab1474829491d972d3dbef1d9f376 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1733575717868/Put/seqid=0 2024-12-07T12:48:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741858_1035 (size=50463) 2024-12-07T12:48:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741858_1035 (size=50463) 2024-12-07T12:48:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741858_1035 (size=50463) 2024-12-07T12:48:42,574 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/a/6cfab1474829491d972d3dbef1d9f376 2024-12-07T12:48:42,581 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/a/6cfab1474829491d972d3dbef1d9f376 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/a/6cfab1474829491d972d3dbef1d9f376 2024-12-07T12:48:42,589 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/a/6cfab1474829491d972d3dbef1d9f376, entries=754, sequenceid=754, filesize=49.3 K 2024-12-07T12:48:42,589 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 322d6909faf94a81cc9b8c7b3915c0a9 in 61ms, sequenceid=754, compaction requested=false; wal=null 2024-12-07T12:48:42,604 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T12:48:42,605 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 322d6909faf94a81cc9b8c7b3915c0a9 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-07T12:48:42,614 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/a/3780b83886c94ca1a6ae818ef7da0c0f is 62, key is testReplayEditsWrittenIntoWAL/a:754/1733575717911/Put/seqid=0 2024-12-07T12:48:42,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741859_1036 (size=20072) 2024-12-07T12:48:42,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741859_1036 (size=20072) 2024-12-07T12:48:42,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741859_1036 (size=20072) 2024-12-07T12:48:42,624 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/a/3780b83886c94ca1a6ae818ef7da0c0f 2024-12-07T12:48:42,645 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/b/bc486bb2d11c4372b5c377fd8fc7396e is 62, key is testReplayEditsWrittenIntoWAL/b:100/1733575717927/Put/seqid=0 2024-12-07T12:48:42,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741860_1037 (size=35835) 2024-12-07T12:48:42,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741860_1037 (size=35835) 2024-12-07T12:48:42,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741860_1037 (size=35835) 2024-12-07T12:48:42,653 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/b/bc486bb2d11c4372b5c377fd8fc7396e 2024-12-07T12:48:42,660 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/a/3780b83886c94ca1a6ae818ef7da0c0f as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/a/3780b83886c94ca1a6ae818ef7da0c0f 2024-12-07T12:48:42,667 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/a/3780b83886c94ca1a6ae818ef7da0c0f, entries=246, sequenceid=1508, filesize=19.6 K 2024-12-07T12:48:42,668 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/b/bc486bb2d11c4372b5c377fd8fc7396e as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/b/bc486bb2d11c4372b5c377fd8fc7396e 2024-12-07T12:48:42,675 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/b/bc486bb2d11c4372b5c377fd8fc7396e, entries=508, sequenceid=1508, filesize=35.0 K 2024-12-07T12:48:42,675 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 322d6909faf94a81cc9b8c7b3915c0a9 in 70ms, sequenceid=1508, compaction requested=false; wal=null 2024-12-07T12:48:42,684 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T12:48:42,684 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 322d6909faf94a81cc9b8c7b3915c0a9 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-07T12:48:42,690 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/b/75ca3a14a45b490d987ac39a38c3a075 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1733575717941/Put/seqid=0 2024-12-07T12:48:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741861_1038 (size=35082) 2024-12-07T12:48:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741861_1038 (size=35082) 2024-12-07T12:48:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741861_1038 (size=35082) 2024-12-07T12:48:42,703 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/b/75ca3a14a45b490d987ac39a38c3a075 2024-12-07T12:48:42,732 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/c/822665d025ef4f7288054fef43f09c6e is 62, key is testReplayEditsWrittenIntoWAL/c:100/1733575717966/Put/seqid=0 2024-12-07T12:48:42,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741862_1039 (size=20825) 2024-12-07T12:48:42,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741862_1039 (size=20825) 2024-12-07T12:48:42,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741862_1039 (size=20825) 2024-12-07T12:48:42,742 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/c/822665d025ef4f7288054fef43f09c6e 2024-12-07T12:48:42,749 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/b/75ca3a14a45b490d987ac39a38c3a075 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/b/75ca3a14a45b490d987ac39a38c3a075 2024-12-07T12:48:42,756 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/b/75ca3a14a45b490d987ac39a38c3a075, entries=492, sequenceid=2262, filesize=34.3 K 2024-12-07T12:48:42,758 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/c/822665d025ef4f7288054fef43f09c6e as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/c/822665d025ef4f7288054fef43f09c6e 2024-12-07T12:48:42,764 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/c/822665d025ef4f7288054fef43f09c6e, entries=262, sequenceid=2262, filesize=20.3 K 2024-12-07T12:48:42,765 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 322d6909faf94a81cc9b8c7b3915c0a9 in 81ms, sequenceid=2262, compaction requested=false; wal=null 2024-12-07T12:48:42,774 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1733575718007/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:42,778 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000003002 2024-12-07T12:48:42,779 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-07T12:48:42,779 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 322d6909faf94a81cc9b8c7b3915c0a9 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-12-07T12:48:42,788 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/c/8cd390f2fe5549a7aa9fc67cd2e5bdf3 is 62, key is testReplayEditsWrittenIntoWAL/c:262/1733575717972/Put/seqid=0 2024-12-07T12:48:42,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741863_1040 (size=50301) 2024-12-07T12:48:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741863_1040 (size=50301) 2024-12-07T12:48:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741863_1040 (size=50301) 2024-12-07T12:48:42,797 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/c/8cd390f2fe5549a7aa9fc67cd2e5bdf3 2024-12-07T12:48:42,803 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8cd390f2fe5549a7aa9fc67cd2e5bdf3 2024-12-07T12:48:42,804 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/.tmp/c/8cd390f2fe5549a7aa9fc67cd2e5bdf3 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/c/8cd390f2fe5549a7aa9fc67cd2e5bdf3 2024-12-07T12:48:42,811 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8cd390f2fe5549a7aa9fc67cd2e5bdf3 2024-12-07T12:48:42,812 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/c/8cd390f2fe5549a7aa9fc67cd2e5bdf3, entries=739, sequenceid=3002, filesize=49.1 K 2024-12-07T12:48:42,812 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 322d6909faf94a81cc9b8c7b3915c0a9 in 33ms, sequenceid=3002, compaction requested=false; wal=null 2024-12-07T12:48:42,813 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/0000000000000003002 2024-12-07T12:48:42,814 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,814 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,815 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:48:42,817 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 322d6909faf94a81cc9b8c7b3915c0a9 2024-12-07T12:48:42,820 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenIntoWAL/322d6909faf94a81cc9b8c7b3915c0a9/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-12-07T12:48:42,821 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 322d6909faf94a81cc9b8c7b3915c0a9; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68277417, jitterRate=0.01741279661655426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:48:42,821 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 322d6909faf94a81cc9b8c7b3915c0a9: Writing region info on filesystem at 1733575722177Initializing all the Stores at 1733575722178 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575722178Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575722179 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575722179Cleaning up temporary data from old regions at 1733575722814 (+635 ms)Region opened successfully at 1733575722821 (+7 ms) 2024-12-07T12:48:42,894 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 322d6909faf94a81cc9b8c7b3915c0a9, disabling compactions & flushes 2024-12-07T12:48:42,895 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:42,895 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:42,895 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. after waiting 0 ms 2024-12-07T12:48:42,895 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:42,897 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733575717770.322d6909faf94a81cc9b8c7b3915c0a9. 2024-12-07T12:48:42,898 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 322d6909faf94a81cc9b8c7b3915c0a9: Waiting for close lock at 1733575722894Disabling compacts and flushes for region at 1733575722894Disabling writes for close at 1733575722895 (+1 ms)Writing region close event to WAL at 1733575722897 (+2 ms)Closed at 1733575722897 2024-12-07T12:48:42,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741857_1034 (size=93) 2024-12-07T12:48:42,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741857_1034 (size=93) 2024-12-07T12:48:42,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741857_1034 (size=93) 2024-12-07T12:48:42,906 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:42,906 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733575722158) 2024-12-07T12:48:42,925 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testReplayEditsWrittenIntoWAL Thread=401 (was 385) Potentially hanging thread: IPC Client (473829079) connection to localhost/127.0.0.1:43093 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_714368817_22 at /127.0.0.1:48690 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (473829079) connection to localhost/127.0.0.1:38813 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_714368817_22 at /127.0.0.1:48720 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38813 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_714368817_22 at /127.0.0.1:50374 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43093 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:43841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_714368817_22 at /127.0.0.1:39526 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (473829079) connection to localhost/127.0.0.1:43841 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=865 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=198 (was 215), ProcessCount=11 (was 11), AvailableMemoryMB=6002 (was 6070) 2024-12-07T12:48:42,939 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#test2727 Thread=401, OpenFileDescriptor=865, MaxFileDescriptor=1048576, SystemLoadAverage=198, ProcessCount=11, AvailableMemoryMB=6002 2024-12-07T12:48:42,959 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:42,962 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:42,963 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:42,966 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-00134982, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-00134982, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:42,978 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-00134982/hregion-00134982.1733575722966, exclude list is [], retry=0 2024-12-07T12:48:42,981 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:42,982 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:42,983 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:42,985 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-00134982/hregion-00134982.1733575722966 2024-12-07T12:48:42,985 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:42,986 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 97c103d3b2dd9284e51fae69a37e8e08, NAME => 'test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:48:42,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741865_1042 (size=43) 2024-12-07T12:48:42,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741865_1042 (size=43) 2024-12-07T12:48:42,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741865_1042 (size=43) 2024-12-07T12:48:42,996 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:42,998 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,000 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97c103d3b2dd9284e51fae69a37e8e08 columnFamilyName a 2024-12-07T12:48:43,000 DEBUG [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:43,001 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(327): Store=97c103d3b2dd9284e51fae69a37e8e08/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:43,001 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,002 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97c103d3b2dd9284e51fae69a37e8e08 columnFamilyName b 2024-12-07T12:48:43,002 DEBUG [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:43,003 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(327): Store=97c103d3b2dd9284e51fae69a37e8e08/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:43,003 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,005 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97c103d3b2dd9284e51fae69a37e8e08 columnFamilyName c 2024-12-07T12:48:43,005 DEBUG [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:43,005 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(327): Store=97c103d3b2dd9284e51fae69a37e8e08/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:43,006 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,006 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,007 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,008 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,008 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,009 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:43,010 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,013 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:43,013 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 97c103d3b2dd9284e51fae69a37e8e08; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64705802, jitterRate=-0.035808414220809937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:43,014 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 97c103d3b2dd9284e51fae69a37e8e08: Writing region info on filesystem at 1733575722996Initializing all the Stores at 1733575722997 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575722998 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575722998Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575722998Cleaning up temporary data from old regions at 1733575723008 (+10 ms)Region opened successfully at 1733575723014 (+6 ms) 2024-12-07T12:48:43,014 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 97c103d3b2dd9284e51fae69a37e8e08, disabling compactions & flushes 2024-12-07T12:48:43,014 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:43,015 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:43,015 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. after waiting 0 ms 2024-12-07T12:48:43,015 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:43,015 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:43,015 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 97c103d3b2dd9284e51fae69a37e8e08: Waiting for close lock at 1733575723014Disabling compacts and flushes for region at 1733575723014Disabling writes for close at 1733575723015 (+1 ms)Writing region close event to WAL at 1733575723015Closed at 1733575723015 2024-12-07T12:48:43,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741864_1041 (size=93) 2024-12-07T12:48:43,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741864_1041 (size=93) 2024-12-07T12:48:43,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741864_1041 (size=93) 2024-12-07T12:48:43,021 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:43,021 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-00134982:(num 1733575722966) 2024-12-07T12:48:43,021 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:43,023 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:43,037 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024, exclude list is [], retry=0 2024-12-07T12:48:43,040 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:43,041 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:43,041 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:43,043 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024 2024-12-07T12:48:43,043 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:48:43,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741866_1043 (size=344306) 2024-12-07T12:48:43,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741866_1043 (size=344306) 2024-12-07T12:48:43,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741866_1043 (size=344306) 2024-12-07T12:48:43,181 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024, size=336.2 K (344306bytes) 2024-12-07T12:48:43,181 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024 2024-12-07T12:48:43,182 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024 after 1ms 2024-12-07T12:48:43,186 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024 took 5ms 2024-12-07T12:48:43,190 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733575723024.temp 2024-12-07T12:48:43,192 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000000001-wal.1733575723024.temp 2024-12-07T12:48:43,222 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024 so closing down 2024-12-07T12:48:43,222 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:43,222 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:43,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741867_1044 (size=344306) 2024-12-07T12:48:43,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741867_1044 (size=344306) 2024-12-07T12:48:43,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741867_1044 (size=344306) 2024-12-07T12:48:43,226 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000000001-wal.1733575723024.temp (wrote 3000 edits, skipped 0 edits in 10 ms) 2024-12-07T12:48:43,228 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000000001-wal.1733575723024.temp to hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003000 2024-12-07T12:48:43,228 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 41 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024, size=336.2 K, length=344306, corrupted=false, cancelled=false 2024-12-07T12:48:43,229 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024, journal: Splitting hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024, size=336.2 K (344306bytes) at 1733575723181Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000000001-wal.1733575723024.temp at 1733575723192 (+11 ms)Split 1024 edits, skipped 0 edits. at 1733575723201 (+9 ms)Split 2048 edits, skipped 0 edits. at 1733575723212 (+11 ms)Finishing writing output for hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024 so closing down at 1733575723222 (+10 ms)3 split writer threads finished at 1733575723222Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000000001-wal.1733575723024.temp (wrote 3000 edits, skipped 0 edits in 10 ms) at 1733575723226 (+4 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000000001-wal.1733575723024.temp to hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003000 at 1733575723228 (+2 ms)Processed 3000 edits across 1 Regions in 41 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024, size=336.2 K, length=344306, corrupted=false, cancelled=false at 1733575723229 (+1 ms) 2024-12-07T12:48:43,230 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723024 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575723024 2024-12-07T12:48:43,232 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003000 2024-12-07T12:48:43,232 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:43,234 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:43,248 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234, exclude list is [], retry=0 2024-12-07T12:48:43,251 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:43,251 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:43,252 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:43,254 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234 2024-12-07T12:48:43,254 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:48:43,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741868_1045 (size=344433) 2024-12-07T12:48:43,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741868_1045 (size=344433) 2024-12-07T12:48:43,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741868_1045 (size=344433) 2024-12-07T12:48:43,413 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234, size=336.4 K (344433bytes) 2024-12-07T12:48:43,413 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234 2024-12-07T12:48:43,413 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234 after 0ms 2024-12-07T12:48:43,416 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234 took 4ms 2024-12-07T12:48:43,420 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1733575723234.temp 2024-12-07T12:48:43,421 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003001-wal.1733575723234.temp 2024-12-07T12:48:43,441 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234 so closing down 2024-12-07T12:48:43,442 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:43,442 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:43,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741869_1046 (size=344433) 2024-12-07T12:48:43,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741869_1046 (size=344433) 2024-12-07T12:48:43,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741869_1046 (size=344433) 2024-12-07T12:48:43,449 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003001-wal.1733575723234.temp (wrote 3000 edits, skipped 0 edits in 9 ms) 2024-12-07T12:48:43,451 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003001-wal.1733575723234.temp to hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000006000 2024-12-07T12:48:43,451 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 34 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234, size=336.4 K, length=344433, corrupted=false, cancelled=false 2024-12-07T12:48:43,451 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234, journal: Splitting hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234, size=336.4 K (344433bytes) at 1733575723413Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003001-wal.1733575723234.temp at 1733575723421 (+8 ms)Split 1024 edits, skipped 0 edits. at 1733575723425 (+4 ms)Split 2048 edits, skipped 0 edits. at 1733575723434 (+9 ms)Finishing writing output for hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234 so closing down at 1733575723442 (+8 ms)3 split writer threads finished at 1733575723442Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003001-wal.1733575723234.temp (wrote 3000 edits, skipped 0 edits in 9 ms) at 1733575723449 (+7 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003001-wal.1733575723234.temp to hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000006000 at 1733575723451 (+2 ms)Processed 3000 edits across 1 Regions in 34 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234, size=336.4 K, length=344433, corrupted=false, cancelled=false at 1733575723451 2024-12-07T12:48:43,453 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723234 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575723234 2024-12-07T12:48:43,454 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000006000 2024-12-07T12:48:43,454 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:43,456 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/test2727-manual,16010,1733575722958, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:43,469 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723456, exclude list is [], retry=0 2024-12-07T12:48:43,472 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:43,473 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:43,473 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:43,475 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733575722958/wal.1733575723456 2024-12-07T12:48:43,476 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:43,476 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 97c103d3b2dd9284e51fae69a37e8e08, NAME => 'test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:43,476 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:43,476 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,476 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,478 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,478 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97c103d3b2dd9284e51fae69a37e8e08 columnFamilyName a 2024-12-07T12:48:43,479 DEBUG [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:43,479 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(327): Store=97c103d3b2dd9284e51fae69a37e8e08/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:43,479 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,480 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97c103d3b2dd9284e51fae69a37e8e08 columnFamilyName b 2024-12-07T12:48:43,480 DEBUG [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:43,480 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(327): Store=97c103d3b2dd9284e51fae69a37e8e08/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:43,480 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,481 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97c103d3b2dd9284e51fae69a37e8e08 columnFamilyName c 2024-12-07T12:48:43,481 DEBUG [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:43,481 INFO [StoreOpener-97c103d3b2dd9284e51fae69a37e8e08-1 {}] regionserver.HStore(327): Store=97c103d3b2dd9284e51fae69a37e8e08/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:43,482 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,482 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,484 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:43,485 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003000 2024-12-07T12:48:43,522 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003000 2024-12-07T12:48:43,523 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000006000 2024-12-07T12:48:43,561 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000006000 2024-12-07T12:48:43,562 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 97c103d3b2dd9284e51fae69a37e8e08 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-12-07T12:48:43,589 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/a/e247446e082e446d9de9f5be8b1283cb is 41, key is test2727/a:100/1733575723258/Put/seqid=0 2024-12-07T12:48:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741871_1048 (size=84227) 2024-12-07T12:48:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741871_1048 (size=84227) 2024-12-07T12:48:43,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741871_1048 (size=84227) 2024-12-07T12:48:43,596 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/a/e247446e082e446d9de9f5be8b1283cb 2024-12-07T12:48:43,626 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/b/aef79f717a074d6bac8b51f847c18ec7 is 41, key is test2727/b:100/1733575723308/Put/seqid=0 2024-12-07T12:48:43,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741872_1049 (size=84609) 2024-12-07T12:48:43,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741872_1049 (size=84609) 2024-12-07T12:48:43,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741872_1049 (size=84609) 2024-12-07T12:48:44,033 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/b/aef79f717a074d6bac8b51f847c18ec7 2024-12-07T12:48:44,060 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/c/ae383ffcc02942ffbe8df04a3681c03c is 41, key is test2727/c:100/1733575723356/Put/seqid=0 2024-12-07T12:48:44,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741873_1050 (size=84609) 2024-12-07T12:48:44,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741873_1050 (size=84609) 2024-12-07T12:48:44,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741873_1050 (size=84609) 2024-12-07T12:48:44,067 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/c/ae383ffcc02942ffbe8df04a3681c03c 2024-12-07T12:48:44,073 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/a/e247446e082e446d9de9f5be8b1283cb as hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/a/e247446e082e446d9de9f5be8b1283cb 2024-12-07T12:48:44,078 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/a/e247446e082e446d9de9f5be8b1283cb, entries=2000, sequenceid=6000, filesize=82.3 K 2024-12-07T12:48:44,080 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/b/aef79f717a074d6bac8b51f847c18ec7 as hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/b/aef79f717a074d6bac8b51f847c18ec7 2024-12-07T12:48:44,085 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/b/aef79f717a074d6bac8b51f847c18ec7, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-07T12:48:44,086 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/.tmp/c/ae383ffcc02942ffbe8df04a3681c03c as hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/c/ae383ffcc02942ffbe8df04a3681c03c 2024-12-07T12:48:44,092 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/c/ae383ffcc02942ffbe8df04a3681c03c, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-07T12:48:44,092 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 97c103d3b2dd9284e51fae69a37e8e08 in 531ms, sequenceid=6000, compaction requested=false; wal=null 2024-12-07T12:48:44,093 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000003000 2024-12-07T12:48:44,094 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/0000000000000006000 2024-12-07T12:48:44,095 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:44,095 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:44,095 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:44,097 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 97c103d3b2dd9284e51fae69a37e8e08 2024-12-07T12:48:44,099 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/test2727/97c103d3b2dd9284e51fae69a37e8e08/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-12-07T12:48:44,099 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 97c103d3b2dd9284e51fae69a37e8e08; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68209286, jitterRate=0.016397565603256226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:44,100 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 97c103d3b2dd9284e51fae69a37e8e08: Writing region info on filesystem at 1733575723476Initializing all the Stores at 1733575723477 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575723477Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575723477Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575723477Obtaining lock to block concurrent updates at 1733575723562 (+85 ms)Preparing flush snapshotting stores in 97c103d3b2dd9284e51fae69a37e8e08 at 1733575723562Finished memstore snapshotting test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1733575723562Flushing stores of test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. at 1733575723562Flushing 97c103d3b2dd9284e51fae69a37e8e08/a: creating writer at 1733575723562Flushing 97c103d3b2dd9284e51fae69a37e8e08/a: appending metadata at 1733575723588 (+26 ms)Flushing 97c103d3b2dd9284e51fae69a37e8e08/a: closing flushed file at 1733575723589 (+1 ms)Flushing 97c103d3b2dd9284e51fae69a37e8e08/b: creating writer at 1733575723603 (+14 ms)Flushing 97c103d3b2dd9284e51fae69a37e8e08/b: appending metadata at 1733575723624 (+21 ms)Flushing 97c103d3b2dd9284e51fae69a37e8e08/b: closing flushed file at 1733575723624Flushing 97c103d3b2dd9284e51fae69a37e8e08/c: creating writer at 1733575724045 (+421 ms)Flushing 97c103d3b2dd9284e51fae69a37e8e08/c: appending metadata at 1733575724059 (+14 ms)Flushing 97c103d3b2dd9284e51fae69a37e8e08/c: closing flushed file at 1733575724060 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@659ddd1: reopening flushed file at 1733575724071 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14e29f44: reopening flushed file at 1733575724079 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@446d0fae: reopening flushed file at 1733575724085 (+6 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 97c103d3b2dd9284e51fae69a37e8e08 in 531ms, sequenceid=6000, compaction requested=false; wal=null at 1733575724092 (+7 ms)Cleaning up temporary data from old regions at 1733575724095 (+3 ms)Region opened successfully at 1733575724100 (+5 ms) 2024-12-07T12:48:44,101 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-12-07T12:48:44,101 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 97c103d3b2dd9284e51fae69a37e8e08, disabling compactions & flushes 2024-12-07T12:48:44,102 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:44,102 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:44,102 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. after waiting 0 ms 2024-12-07T12:48:44,102 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:44,103 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733575722960.97c103d3b2dd9284e51fae69a37e8e08. 2024-12-07T12:48:44,103 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 97c103d3b2dd9284e51fae69a37e8e08: Waiting for close lock at 1733575724101Disabling compacts and flushes for region at 1733575724101Disabling writes for close at 1733575724102 (+1 ms)Writing region close event to WAL at 1733575724103 (+1 ms)Closed at 1733575724103 2024-12-07T12:48:44,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741870_1047 (size=93) 2024-12-07T12:48:44,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741870_1047 (size=93) 2024-12-07T12:48:44,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741870_1047 (size=93) 2024-12-07T12:48:44,108 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:44,108 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733575723456) 2024-12-07T12:48:44,120 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#test2727 Thread=403 (was 401) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:48838 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50508 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:39580 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=927 (was 865) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=198 (was 198), ProcessCount=11 (was 11), AvailableMemoryMB=5814 (was 6002) 2024-12-07T12:48:44,130 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testSequentialEditLogSeqNum Thread=403, OpenFileDescriptor=927, MaxFileDescriptor=1048576, SystemLoadAverage=198, ProcessCount=11, AvailableMemoryMB=5813 2024-12-07T12:48:44,142 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:44,148 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:44,149 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1733575724148 2024-12-07T12:48:44,155 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148 2024-12-07T12:48:44,157 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:44,158 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2711c1d9ed2671b8f817e0aaacb9cc0e, NAME => 'testSequentialEditLogSeqNum,,1733575724143.2711c1d9ed2671b8f817e0aaacb9cc0e.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:44,158 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1733575724143.2711c1d9ed2671b8f817e0aaacb9cc0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:44,158 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,158 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,159 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e doesn't exist for region: 2711c1d9ed2671b8f817e0aaacb9cc0e on table testSequentialEditLogSeqNum 2024-12-07T12:48:44,160 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 2711c1d9ed2671b8f817e0aaacb9cc0e on table testSequentialEditLogSeqNum 2024-12-07T12:48:44,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741875_1052 (size=62) 2024-12-07T12:48:44,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741875_1052 (size=62) 2024-12-07T12:48:44,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741875_1052 (size=62) 2024-12-07T12:48:44,170 INFO [StoreOpener-2711c1d9ed2671b8f817e0aaacb9cc0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,172 INFO [StoreOpener-2711c1d9ed2671b8f817e0aaacb9cc0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2711c1d9ed2671b8f817e0aaacb9cc0e columnFamilyName a 2024-12-07T12:48:44,172 DEBUG [StoreOpener-2711c1d9ed2671b8f817e0aaacb9cc0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:44,172 INFO [StoreOpener-2711c1d9ed2671b8f817e0aaacb9cc0e-1 {}] regionserver.HStore(327): Store=2711c1d9ed2671b8f817e0aaacb9cc0e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:44,173 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,173 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,174 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,174 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,174 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,176 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2711c1d9ed2671b8f817e0aaacb9cc0e 2024-12-07T12:48:44,178 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:44,179 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2711c1d9ed2671b8f817e0aaacb9cc0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63071093, jitterRate=-0.060167476534843445}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:48:44,180 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2711c1d9ed2671b8f817e0aaacb9cc0e: Writing region info on filesystem at 1733575724158Initializing all the Stores at 1733575724170 (+12 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575724170Cleaning up temporary data from old regions at 1733575724174 (+4 ms)Region opened successfully at 1733575724180 (+6 ms) 2024-12-07T12:48:44,192 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2711c1d9ed2671b8f817e0aaacb9cc0e 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-12-07T12:48:44,212 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/.tmp/a/a257a4949cf948d7a4b1fcaa92ea92dd is 81, key is testSequentialEditLogSeqNum/a:x0/1733575724180/Put/seqid=0 2024-12-07T12:48:44,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741876_1053 (size=5833) 2024-12-07T12:48:44,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741876_1053 (size=5833) 2024-12-07T12:48:44,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741876_1053 (size=5833) 2024-12-07T12:48:44,219 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/.tmp/a/a257a4949cf948d7a4b1fcaa92ea92dd 2024-12-07T12:48:44,227 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/.tmp/a/a257a4949cf948d7a4b1fcaa92ea92dd as hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/a/a257a4949cf948d7a4b1fcaa92ea92dd 2024-12-07T12:48:44,234 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/a/a257a4949cf948d7a4b1fcaa92ea92dd, entries=10, sequenceid=13, filesize=5.7 K 2024-12-07T12:48:44,236 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 2711c1d9ed2671b8f817e0aaacb9cc0e in 43ms, sequenceid=13, compaction requested=false 2024-12-07T12:48:44,236 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2711c1d9ed2671b8f817e0aaacb9cc0e: 2024-12-07T12:48:44,243 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:48:44,243 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:48:44,243 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:48:44,243 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:48:44,243 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:48:44,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741874_1051 (size=3017) 2024-12-07T12:48:44,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741874_1051 (size=3017) 2024-12-07T12:48:44,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741874_1051 (size=3017) 2024-12-07T12:48:44,260 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148, size=2.9 K (3017bytes) 2024-12-07T12:48:44,260 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148 2024-12-07T12:48:44,261 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148 after 1ms 2024-12-07T12:48:44,263 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148 took 3ms 2024-12-07T12:48:44,265 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148 so closing down 2024-12-07T12:48:44,265 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:44,266 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733575724148.temp 2024-12-07T12:48:44,267 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000003-wal.1733575724148.temp 2024-12-07T12:48:44,268 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:44,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741877_1054 (size=2433) 2024-12-07T12:48:44,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741877_1054 (size=2433) 2024-12-07T12:48:44,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741877_1054 (size=2433) 2024-12-07T12:48:44,274 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000003-wal.1733575724148.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-07T12:48:44,275 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000003-wal.1733575724148.temp to hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000020 2024-12-07T12:48:44,275 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 11 ms; skipped=2; WAL=hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148, size=2.9 K, length=3017, corrupted=false, cancelled=false 2024-12-07T12:48:44,275 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148, journal: Splitting hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148, size=2.9 K (3017bytes) at 1733575724260Finishing writing output for hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148 so closing down at 1733575724265 (+5 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000003-wal.1733575724148.temp at 1733575724267 (+2 ms)3 split writer threads finished at 1733575724268 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000003-wal.1733575724148.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733575724274 (+6 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000003-wal.1733575724148.temp to hdfs://localhost:43841/hbase/data/default/testSequentialEditLogSeqNum/2711c1d9ed2671b8f817e0aaacb9cc0e/recovered.edits/0000000000000000020 at 1733575724275 (+1 ms)Processed 17 edits across 1 Regions in 11 ms; skipped=2; WAL=hdfs://localhost:43841/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733575724142/wal.1733575724148, size=2.9 K, length=3017, corrupted=false, cancelled=false at 1733575724275 2024-12-07T12:48:44,288 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testSequentialEditLogSeqNum Thread=408 (was 403) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:39580 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=963 (was 927) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=198 (was 198), ProcessCount=11 (was 11), AvailableMemoryMB=5795 (was 5813) 2024-12-07T12:48:44,301 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testRegionMadeOfBulkLoadedFilesOnly Thread=408, OpenFileDescriptor=963, MaxFileDescriptor=1048576, SystemLoadAverage=198, ProcessCount=11, AvailableMemoryMB=5791 2024-12-07T12:48:44,317 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:44,320 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:44,342 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:44,347 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-01231351, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-01231351, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:44,362 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-01231351/hregion-01231351.1733575724348, exclude list is [], retry=0 2024-12-07T12:48:44,365 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:44,365 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:44,365 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:44,368 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-01231351/hregion-01231351.1733575724348 2024-12-07T12:48:44,368 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:44,368 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 45f8d5536cd5a4c5f6a205e89e0a1c60, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:48:44,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741879_1056 (size=70) 2024-12-07T12:48:44,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741879_1056 (size=70) 2024-12-07T12:48:44,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741879_1056 (size=70) 2024-12-07T12:48:44,378 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:44,379 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,381 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName a 2024-12-07T12:48:44,381 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:44,381 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:44,381 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,383 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName b 2024-12-07T12:48:44,383 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:44,384 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:44,385 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,387 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName c 2024-12-07T12:48:44,387 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:44,388 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:44,388 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,389 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,389 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,391 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,391 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,391 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:44,392 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,395 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:44,395 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 45f8d5536cd5a4c5f6a205e89e0a1c60; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63871353, jitterRate=-0.04824267327785492}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:44,396 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 45f8d5536cd5a4c5f6a205e89e0a1c60: Writing region info on filesystem at 1733575724378Initializing all the Stores at 1733575724379 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575724379Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575724379Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575724379Cleaning up temporary data from old regions at 1733575724391 (+12 ms)Region opened successfully at 1733575724396 (+5 ms) 2024-12-07T12:48:44,396 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 45f8d5536cd5a4c5f6a205e89e0a1c60, disabling compactions & flushes 2024-12-07T12:48:44,396 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:44,396 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:44,396 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. after waiting 0 ms 2024-12-07T12:48:44,396 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:44,397 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:44,397 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 45f8d5536cd5a4c5f6a205e89e0a1c60: Waiting for close lock at 1733575724396Disabling compacts and flushes for region at 1733575724396Disabling writes for close at 1733575724396Writing region close event to WAL at 1733575724397 (+1 ms)Closed at 1733575724397 2024-12-07T12:48:44,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741878_1055 (size=93) 2024-12-07T12:48:44,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741878_1055 (size=93) 2024-12-07T12:48:44,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741878_1055 (size=93) 2024-12-07T12:48:44,403 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:44,403 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-01231351:(num 1733575724348) 2024-12-07T12:48:44,403 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:44,405 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:44,418 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406, exclude list is [], retry=0 2024-12-07T12:48:44,422 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:44,422 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:44,422 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:44,425 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 2024-12-07T12:48:44,426 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:44,426 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 45f8d5536cd5a4c5f6a205e89e0a1c60, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:44,426 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:44,426 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,427 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,429 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,430 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName a 2024-12-07T12:48:44,430 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:44,431 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:44,431 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,432 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName b 2024-12-07T12:48:44,432 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:44,432 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:44,432 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,433 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName c 2024-12-07T12:48:44,434 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:44,434 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:44,434 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,435 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,437 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,438 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,438 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,439 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:44,441 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:44,442 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 45f8d5536cd5a4c5f6a205e89e0a1c60; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74411430, jitterRate=0.1088167130947113}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:44,443 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 45f8d5536cd5a4c5f6a205e89e0a1c60: Writing region info on filesystem at 1733575724427Initializing all the Stores at 1733575724428 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575724428Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575724428Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575724428Cleaning up temporary data from old regions at 1733575724438 (+10 ms)Region opened successfully at 1733575724443 (+5 ms) 2024-12-07T12:48:44,449 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1733575724448/Put/seqid=0 2024-12-07T12:48:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741881_1058 (size=4826) 2024-12-07T12:48:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741881_1058 (size=4826) 2024-12-07T12:48:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741881_1058 (size=4826) 2024-12-07T12:48:44,458 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:43841/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in 45f8d5536cd5a4c5f6a205e89e0a1c60/a 2024-12-07T12:48:44,465 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-12-07T12:48:44,465 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T12:48:44,465 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 45f8d5536cd5a4c5f6a205e89e0a1c60: 2024-12-07T12:48:44,467 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/a/70b36763d30341a6a078abfa324e711f_SeqId_3_ 2024-12-07T12:48:44,468 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:43841/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 45f8d5536cd5a4c5f6a205e89e0a1c60/a as hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/a/70b36763d30341a6a078abfa324e711f_SeqId_3_ - updating store file list. 2024-12-07T12:48:44,475 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 70b36763d30341a6a078abfa324e711f_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-07T12:48:44,475 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/a/70b36763d30341a6a078abfa324e711f_SeqId_3_ into 45f8d5536cd5a4c5f6a205e89e0a1c60/a 2024-12-07T12:48:44,475 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:43841/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 45f8d5536cd5a4c5f6a205e89e0a1c60/a (new location: hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/a/70b36763d30341a6a078abfa324e711f_SeqId_3_) 2024-12-07T12:48:44,524 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406, size=0 (0bytes) 2024-12-07T12:48:44,524 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 might be still open, length is 0 2024-12-07T12:48:44,524 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 2024-12-07T12:48:44,525 WARN [IPC Server handler 4 on default port 43841 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-12-07T12:48:44,525 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 after 1ms 2024-12-07T12:48:44,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:48:44,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:44,539 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-12-07T12:48:44,539 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:44,540 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:48:44,540 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T12:48:44,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T12:48:44,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:44,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-12-07T12:48:44,542 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:44,543 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-12-07T12:48:44,543 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:45,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50614 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50614 dst: /127.0.0.1:46077 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:46077 remote=/127.0.0.1:50614]. Total timeout mills is 60000, 59255 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:45,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:48920 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:36341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48920 dst: /127.0.0.1:36341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:45,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:39702 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39702 dst: /127.0.0.1:35169 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:45,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741880_1059 (size=504) 2024-12-07T12:48:45,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741880_1059 (size=504) 2024-12-07T12:48:48,526 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 after 4002ms 2024-12-07T12:48:48,531 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 took 4007ms 2024-12-07T12:48:48,533 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406; continuing. 2024-12-07T12:48:48,533 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 so closing down 2024-12-07T12:48:48,534 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:48,536 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1733575724406.temp 2024-12-07T12:48:48,537 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005-wal.1733575724406.temp 2024-12-07T12:48:48,537 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:48,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741882_1060 (size=238) 2024-12-07T12:48:48,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741882_1060 (size=238) 2024-12-07T12:48:48,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741882_1060 (size=238) 2024-12-07T12:48:48,546 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005-wal.1733575724406.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-07T12:48:48,548 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005-wal.1733575724406.temp to hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005 2024-12-07T12:48:48,548 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 16 ms; skipped=1; WAL=hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T12:48:48,548 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406, journal: Splitting hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406, size=0 (0bytes) at 1733575724524Finishing writing output for hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 so closing down at 1733575728533 (+4009 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005-wal.1733575724406.temp at 1733575728537 (+4 ms)3 split writer threads finished at 1733575728537Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005-wal.1733575724406.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733575728546 (+9 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005-wal.1733575724406.temp to hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005 at 1733575728548 (+2 ms)Processed 2 edits across 1 Regions in 16 ms; skipped=1; WAL=hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406, size=0, length=0, corrupted=false, cancelled=false at 1733575728548 2024-12-07T12:48:48,550 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575724406 2024-12-07T12:48:48,551 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005 2024-12-07T12:48:48,551 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:48,553 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:48,566 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575728553, exclude list is [], retry=0 2024-12-07T12:48:48,568 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:48,569 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:48,569 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:48,571 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575728553 2024-12-07T12:48:48,572 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:48,572 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 45f8d5536cd5a4c5f6a205e89e0a1c60, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:48,572 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:48,572 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,572 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,574 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,575 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName a 2024-12-07T12:48:48,575 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:48,605 DEBUG [StoreFileOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 70b36763d30341a6a078abfa324e711f_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-07T12:48:48,606 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/a/70b36763d30341a6a078abfa324e711f_SeqId_3_ 2024-12-07T12:48:48,606 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:48,606 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,607 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName b 2024-12-07T12:48:48,607 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:48,608 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:48,608 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,609 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45f8d5536cd5a4c5f6a205e89e0a1c60 columnFamilyName c 2024-12-07T12:48:48,609 DEBUG [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:48,610 INFO [StoreOpener-45f8d5536cd5a4c5f6a205e89e0a1c60-1 {}] regionserver.HStore(327): Store=45f8d5536cd5a4c5f6a205e89e0a1c60/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:48,610 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,611 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,612 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,613 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005 2024-12-07T12:48:48,616 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005 2024-12-07T12:48:48,616 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 45f8d5536cd5a4c5f6a205e89e0a1c60 3/3 column families, dataSize=58 B heapSize=904 B 2024-12-07T12:48:48,643 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/.tmp/a/1acbf02c9bd547449639526ee231565a is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1733575724480/Put/seqid=0 2024-12-07T12:48:48,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741884_1062 (size=5149) 2024-12-07T12:48:48,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741884_1062 (size=5149) 2024-12-07T12:48:48,650 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/.tmp/a/1acbf02c9bd547449639526ee231565a 2024-12-07T12:48:48,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741884_1062 (size=5149) 2024-12-07T12:48:48,656 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/.tmp/a/1acbf02c9bd547449639526ee231565a as hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/a/1acbf02c9bd547449639526ee231565a 2024-12-07T12:48:48,662 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/a/1acbf02c9bd547449639526ee231565a, entries=1, sequenceid=5, filesize=5.0 K 2024-12-07T12:48:48,662 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 45f8d5536cd5a4c5f6a205e89e0a1c60 in 46ms, sequenceid=5, compaction requested=false; wal=null 2024-12-07T12:48:48,663 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/0000000000000000005 2024-12-07T12:48:48,664 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,664 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,664 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:48,666 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 45f8d5536cd5a4c5f6a205e89e0a1c60 2024-12-07T12:48:48,668 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/45f8d5536cd5a4c5f6a205e89e0a1c60/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-12-07T12:48:48,669 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 45f8d5536cd5a4c5f6a205e89e0a1c60; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60361063, jitterRate=-0.10055007040500641}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:48,670 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 45f8d5536cd5a4c5f6a205e89e0a1c60: Writing region info on filesystem at 1733575728572Initializing all the Stores at 1733575728573 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575728573Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575728574 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575728574Obtaining lock to block concurrent updates at 1733575728616 (+42 ms)Preparing flush snapshotting stores in 45f8d5536cd5a4c5f6a205e89e0a1c60 at 1733575728616Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1733575728616Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. at 1733575728616Flushing 45f8d5536cd5a4c5f6a205e89e0a1c60/a: creating writer at 1733575728616Flushing 45f8d5536cd5a4c5f6a205e89e0a1c60/a: appending metadata at 1733575728642 (+26 ms)Flushing 45f8d5536cd5a4c5f6a205e89e0a1c60/a: closing flushed file at 1733575728642Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bd486b1: reopening flushed file at 1733575728656 (+14 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 45f8d5536cd5a4c5f6a205e89e0a1c60 in 46ms, sequenceid=5, compaction requested=false; wal=null at 1733575728662 (+6 ms)Cleaning up temporary data from old regions at 1733575728664 (+2 ms)Region opened successfully at 1733575728669 (+5 ms) 2024-12-07T12:48:48,674 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 45f8d5536cd5a4c5f6a205e89e0a1c60, disabling compactions & flushes 2024-12-07T12:48:48,674 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:48,674 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:48,674 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. after waiting 0 ms 2024-12-07T12:48:48,674 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:48,675 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733575724318.45f8d5536cd5a4c5f6a205e89e0a1c60. 2024-12-07T12:48:48,675 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 45f8d5536cd5a4c5f6a205e89e0a1c60: Waiting for close lock at 1733575728674Disabling compacts and flushes for region at 1733575728674Disabling writes for close at 1733575728674Writing region close event to WAL at 1733575728675 (+1 ms)Closed at 1733575728675 2024-12-07T12:48:48,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741883_1061 (size=93) 2024-12-07T12:48:48,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741883_1061 (size=93) 2024-12-07T12:48:48,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741883_1061 (size=93) 2024-12-07T12:48:48,680 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:48,680 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733575728553) 2024-12-07T12:48:48,692 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testRegionMadeOfBulkLoadedFilesOnly Thread=409 (was 408) Potentially hanging thread: IPC Client (473829079) connection to localhost/127.0.0.1:43841 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-227207161_22 at /127.0.0.1:39724 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-227207161_22 at /127.0.0.1:50644 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:43841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1022 (was 963) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=198 (was 198), ProcessCount=11 (was 11), AvailableMemoryMB=5685 (was 5791) 2024-12-07T12:48:48,704 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testReplayEditsAfterRegionMovedWithMultiCF Thread=409, OpenFileDescriptor=1022, MaxFileDescriptor=1048576, SystemLoadAverage=198, ProcessCount=11, AvailableMemoryMB=5684 2024-12-07T12:48:48,721 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:48,725 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T12:48:48,729 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2d46b487c067,37233,1733575714217 2024-12-07T12:48:48,732 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@30ce3b49 2024-12-07T12:48:48,733 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T12:48:48,735 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T12:48:48,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:48:48,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-12-07T12:48:48,746 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T12:48:48,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-12-07T12:48:48,748 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:48,750 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T12:48:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:48:48,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741885_1063 (size=694) 2024-12-07T12:48:48,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741885_1063 (size=694) 2024-12-07T12:48:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741885_1063 (size=694) 2024-12-07T12:48:48,763 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 05976781667afccaf4cfd2929edf2476, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23 2024-12-07T12:48:48,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741886_1064 (size=77) 2024-12-07T12:48:48,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741886_1064 (size=77) 2024-12-07T12:48:48,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741886_1064 (size=77) 2024-12-07T12:48:48,771 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:48,772 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 05976781667afccaf4cfd2929edf2476, disabling compactions & flushes 2024-12-07T12:48:48,772 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:48,772 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:48,772 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. after waiting 0 ms 2024-12-07T12:48:48,772 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:48,772 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:48,772 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 05976781667afccaf4cfd2929edf2476: Waiting for close lock at 1733575728771Disabling compacts and flushes for region at 1733575728771Disabling writes for close at 1733575728772 (+1 ms)Writing region close event to WAL at 1733575728772Closed at 1733575728772 2024-12-07T12:48:48,773 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T12:48:48,780 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1733575728773"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733575728773"}]},"ts":"1733575728773"} 2024-12-07T12:48:48,783 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T12:48:48,785 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T12:48:48,787 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733575728785"}]},"ts":"1733575728785"} 2024-12-07T12:48:48,791 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-12-07T12:48:48,792 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {2d46b487c067=0} racks are {/default-rack=0} 2024-12-07T12:48:48,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T12:48:48,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T12:48:48,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T12:48:48,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T12:48:48,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T12:48:48,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T12:48:48,793 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T12:48:48,793 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T12:48:48,793 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T12:48:48,794 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T12:48:48,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN}] 2024-12-07T12:48:48,797 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN 2024-12-07T12:48:48,798 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN; state=OFFLINE, location=2d46b487c067,39027,1733575714856; forceNewPlan=false, retain=false 2024-12-07T12:48:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:48:48,954 INFO [2d46b487c067:37233 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T12:48:48,955 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPENING, regionLocation=2d46b487c067,39027,1733575714856 2024-12-07T12:48:48,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN because future has completed 2024-12-07T12:48:48,960 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856}] 2024-12-07T12:48:49,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:48:49,114 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:48:49,118 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36423, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:48:49,127 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,127 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 05976781667afccaf4cfd2929edf2476, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:49,128 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,128 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:49,128 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,128 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,130 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,132 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf1 2024-12-07T12:48:49,132 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:49,133 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:49,133 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,134 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf2 2024-12-07T12:48:49,135 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:49,135 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:49,135 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,136 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,137 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,138 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,138 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,139 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T12:48:49,141 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,144 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:49,144 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 05976781667afccaf4cfd2929edf2476; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65022229, jitterRate=-0.031093284487724304}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T12:48:49,145 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,146 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 05976781667afccaf4cfd2929edf2476: Running coprocessor pre-open hook at 1733575729128Writing region info on filesystem at 1733575729128Initializing all the Stores at 1733575729130 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575729130Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575729130Cleaning up temporary data from old regions at 1733575729138 (+8 ms)Running coprocessor post-open hooks at 1733575729145 (+7 ms)Region opened successfully at 1733575729146 (+1 ms) 2024-12-07T12:48:49,148 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., pid=6, masterSystemTime=1733575729113 2024-12-07T12:48:49,151 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,151 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,152 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPEN, openSeqNum=2, regionLocation=2d46b487c067,39027,1733575714856 2024-12-07T12:48:49,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856 because future has completed 2024-12-07T12:48:49,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T12:48:49,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856 in 197 msec 2024-12-07T12:48:49,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T12:48:49,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN in 365 msec 2024-12-07T12:48:49,165 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T12:48:49,165 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733575729165"}]},"ts":"1733575729165"} 2024-12-07T12:48:49,167 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-12-07T12:48:49,169 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T12:48:49,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 429 msec 2024-12-07T12:48:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:48:49,382 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-12-07T12:48:49,382 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-12-07T12:48:49,385 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T12:48:49,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-12-07T12:48:49,392 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T12:48:49,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-12-07T12:48:49,405 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=2] 2024-12-07T12:48:49,407 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:48:49,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:48:49,422 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=05976781667afccaf4cfd2929edf2476, source=2d46b487c067,39027,1733575714856, destination=2d46b487c067,39787,1733575714772, warming up region on 2d46b487c067,39787,1733575714772 2024-12-07T12:48:49,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:48:49,425 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=05976781667afccaf4cfd2929edf2476, source=2d46b487c067,39027,1733575714856, destination=2d46b487c067,39787,1733575714772, running balancer 2024-12-07T12:48:49,426 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54613, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:48:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE 2024-12-07T12:48:49,426 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE 2024-12-07T12:48:49,429 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=CLOSING, regionLocation=2d46b487c067,39027,1733575714856 2024-12-07T12:48:49,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(7855): Warmup {ENCODED => 05976781667afccaf4cfd2929edf2476, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:49,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:49,431 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,432 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf1 2024-12-07T12:48:49,432 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:49,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE because future has completed 2024-12-07T12:48:49,433 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:49,433 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,433 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T12:48:49,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856}] 2024-12-07T12:48:49,434 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf2 2024-12-07T12:48:49,434 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:49,434 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(1722): Closing 05976781667afccaf4cfd2929edf2476, disabling compactions & flushes 2024-12-07T12:48:49,435 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. after waiting 0 ms 2024-12-07T12:48:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,435 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] regionserver.HRegion(1676): Region close journal for 05976781667afccaf4cfd2929edf2476: Waiting for close lock at 1733575729435Disabling compacts and flushes for region at 1733575729435Disabling writes for close at 1733575729435Writing region close event to WAL at 1733575729435Closed at 1733575729435 2024-12-07T12:48:49,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-12-07T12:48:49,593 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,594 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-07T12:48:49,595 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 05976781667afccaf4cfd2929edf2476, disabling compactions & flushes 2024-12-07T12:48:49,595 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,595 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,595 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. after waiting 0 ms 2024-12-07T12:48:49,595 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,595 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 05976781667afccaf4cfd2929edf2476 2/2 column families, dataSize=31 B heapSize=616 B 2024-12-07T12:48:49,622 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf1/c820feae5a94453c97ada0cd81ef3d6b is 35, key is r1/cf1:q/1733575729410/Put/seqid=0 2024-12-07T12:48:49,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741887_1065 (size=4783) 2024-12-07T12:48:49,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741887_1065 (size=4783) 2024-12-07T12:48:49,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741887_1065 (size=4783) 2024-12-07T12:48:49,629 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf1/c820feae5a94453c97ada0cd81ef3d6b 2024-12-07T12:48:49,637 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf1/c820feae5a94453c97ada0cd81ef3d6b as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b 2024-12-07T12:48:49,644 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T12:48:49,646 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 05976781667afccaf4cfd2929edf2476 in 50ms, sequenceid=5, compaction requested=false 2024-12-07T12:48:49,646 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-12-07T12:48:49,652 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T12:48:49,654 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,654 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 05976781667afccaf4cfd2929edf2476: Waiting for close lock at 1733575729595Running coprocessor pre-close hooks at 1733575729595Disabling compacts and flushes for region at 1733575729595Disabling writes for close at 1733575729595Obtaining lock to block concurrent updates at 1733575729595Preparing flush snapshotting stores in 05976781667afccaf4cfd2929edf2476 at 1733575729595Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1733575729596 (+1 ms)Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. at 1733575729597 (+1 ms)Flushing 05976781667afccaf4cfd2929edf2476/cf1: creating writer at 1733575729598 (+1 ms)Flushing 05976781667afccaf4cfd2929edf2476/cf1: appending metadata at 1733575729622 (+24 ms)Flushing 05976781667afccaf4cfd2929edf2476/cf1: closing flushed file at 1733575729622Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@191d0b2a: reopening flushed file at 1733575729636 (+14 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 05976781667afccaf4cfd2929edf2476 in 50ms, sequenceid=5, compaction requested=false at 1733575729646 (+10 ms)Writing region close event to WAL at 1733575729647 (+1 ms)Running coprocessor post-close hooks at 1733575729652 (+5 ms)Closed at 1733575729654 (+2 ms) 2024-12-07T12:48:49,655 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 05976781667afccaf4cfd2929edf2476 move to 2d46b487c067,39787,1733575714772 record at close sequenceid=5 2024-12-07T12:48:49,658 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,659 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=CLOSED 2024-12-07T12:48:49,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856 because future has completed 2024-12-07T12:48:49,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T12:48:49,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856 in 230 msec 2024-12-07T12:48:49,666 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE; state=CLOSED, location=2d46b487c067,39787,1733575714772; forceNewPlan=false, retain=false 2024-12-07T12:48:49,817 INFO [2d46b487c067:37233 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T12:48:49,817 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPENING, regionLocation=2d46b487c067,39787,1733575714772 2024-12-07T12:48:49,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE because future has completed 2024-12-07T12:48:49,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39787,1733575714772}] 2024-12-07T12:48:49,983 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:49,983 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 05976781667afccaf4cfd2929edf2476, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:49,983 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,984 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:49,984 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,984 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,987 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,989 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf1 2024-12-07T12:48:49,989 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:49,997 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b 2024-12-07T12:48:49,997 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:49,998 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:49,999 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf2 2024-12-07T12:48:49,999 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:49,999 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:49,999 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,000 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,002 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,003 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,003 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,004 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T12:48:50,006 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,007 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 05976781667afccaf4cfd2929edf2476; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73650974, jitterRate=0.09748503565788269}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T12:48:50,007 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,008 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 05976781667afccaf4cfd2929edf2476: Running coprocessor pre-open hook at 1733575729984Writing region info on filesystem at 1733575729984Initializing all the Stores at 1733575729986 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575729986Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575729987 (+1 ms)Cleaning up temporary data from old regions at 1733575730003 (+16 ms)Running coprocessor post-open hooks at 1733575730007 (+4 ms)Region opened successfully at 1733575730007 2024-12-07T12:48:50,009 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., pid=9, masterSystemTime=1733575729976 2024-12-07T12:48:50,011 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,012 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,012 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPEN, openSeqNum=9, regionLocation=2d46b487c067,39787,1733575714772 2024-12-07T12:48:50,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39787,1733575714772 because future has completed 2024-12-07T12:48:50,019 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-07T12:48:50,019 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39787,1733575714772 in 195 msec 2024-12-07T12:48:50,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE in 594 msec 2024-12-07T12:48:50,033 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:48:50,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43654, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:48:50,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.2:50070 deadline: 1733575790039, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39787 startCode=1733575714772. As of locationSeqNum=5. 2024-12-07T12:48:50,069 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39787 startCode=1733575714772. As of locationSeqNum=5. 2024-12-07T12:48:50,070 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39787 startCode=1733575714772. As of locationSeqNum=5. 2024-12-07T12:48:50,070 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39787,1733575714772, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39787 startCode=1733575714772. As of locationSeqNum=5. 2024-12-07T12:48:50,180 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:48:50,182 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:48:50,193 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 05976781667afccaf4cfd2929edf2476 2/2 column families, dataSize=50 B heapSize=720 B 2024-12-07T12:48:50,213 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf1/d646a45513534a849a905c34af3b0e63 is 29, key is r1/cf1:/1733575730183/DeleteFamily/seqid=0 2024-12-07T12:48:50,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741888_1066 (size=4906) 2024-12-07T12:48:50,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741888_1066 (size=4906) 2024-12-07T12:48:50,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741888_1066 (size=4906) 2024-12-07T12:48:50,221 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf1/d646a45513534a849a905c34af3b0e63 2024-12-07T12:48:50,227 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d646a45513534a849a905c34af3b0e63 2024-12-07T12:48:50,240 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf2/212bdbe6a9ed42379b1ef57f391f4293 is 29, key is r1/cf2:/1733575730183/DeleteFamily/seqid=0 2024-12-07T12:48:50,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741889_1067 (size=4906) 2024-12-07T12:48:50,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741889_1067 (size=4906) 2024-12-07T12:48:50,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741889_1067 (size=4906) 2024-12-07T12:48:50,247 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf2/212bdbe6a9ed42379b1ef57f391f4293 2024-12-07T12:48:50,253 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 212bdbe6a9ed42379b1ef57f391f4293 2024-12-07T12:48:50,254 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf1/d646a45513534a849a905c34af3b0e63 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/d646a45513534a849a905c34af3b0e63 2024-12-07T12:48:50,261 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d646a45513534a849a905c34af3b0e63 2024-12-07T12:48:50,261 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/d646a45513534a849a905c34af3b0e63, entries=1, sequenceid=12, filesize=4.8 K 2024-12-07T12:48:50,262 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf2/212bdbe6a9ed42379b1ef57f391f4293 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/212bdbe6a9ed42379b1ef57f391f4293 2024-12-07T12:48:50,270 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 212bdbe6a9ed42379b1ef57f391f4293 2024-12-07T12:48:50,270 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/212bdbe6a9ed42379b1ef57f391f4293, entries=1, sequenceid=12, filesize=4.8 K 2024-12-07T12:48:50,271 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 05976781667afccaf4cfd2929edf2476 in 78ms, sequenceid=12, compaction requested=false 2024-12-07T12:48:50,271 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 05976781667afccaf4cfd2929edf2476: 2024-12-07T12:48:50,274 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-07T12:48:50,276 DEBUG [Time-limited test {}] regionserver.HStore(1541): 05976781667afccaf4cfd2929edf2476/cf1 is initiating major compaction (all files) 2024-12-07T12:48:50,276 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:48:50,276 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:50,277 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 05976781667afccaf4cfd2929edf2476/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,277 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b, hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/d646a45513534a849a905c34af3b0e63] into tmpdir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp, totalSize=9.5 K 2024-12-07T12:48:50,279 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c820feae5a94453c97ada0cd81ef3d6b, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733575729410 2024-12-07T12:48:50,280 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d646a45513534a849a905c34af3b0e63, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-07T12:48:50,291 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 05976781667afccaf4cfd2929edf2476#cf1#compaction#16 average throughput is 0.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:48:50,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741890_1068 (size=4626) 2024-12-07T12:48:50,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741890_1068 (size=4626) 2024-12-07T12:48:50,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741890_1068 (size=4626) 2024-12-07T12:48:50,306 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf1/6b7bca0a87834eb2ba7ae7598ca1f2ef as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/6b7bca0a87834eb2ba7ae7598ca1f2ef 2024-12-07T12:48:50,319 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 05976781667afccaf4cfd2929edf2476/cf1 of 05976781667afccaf4cfd2929edf2476 into 6b7bca0a87834eb2ba7ae7598ca1f2ef(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:48:50,319 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 05976781667afccaf4cfd2929edf2476: 2024-12-07T12:48:50,319 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-07T12:48:50,320 DEBUG [Time-limited test {}] regionserver.HStore(1541): 05976781667afccaf4cfd2929edf2476/cf2 is initiating major compaction (all files) 2024-12-07T12:48:50,320 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:48:50,320 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:48:50,320 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 05976781667afccaf4cfd2929edf2476/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,320 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/212bdbe6a9ed42379b1ef57f391f4293] into tmpdir=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp, totalSize=4.8 K 2024-12-07T12:48:50,321 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 212bdbe6a9ed42379b1ef57f391f4293, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-07T12:48:50,327 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 05976781667afccaf4cfd2929edf2476#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:48:50,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741891_1069 (size=4592) 2024-12-07T12:48:50,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741891_1069 (size=4592) 2024-12-07T12:48:50,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741891_1069 (size=4592) 2024-12-07T12:48:50,341 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/.tmp/cf2/2eb796afbb1144de9525f939ed12ed83 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/2eb796afbb1144de9525f939ed12ed83 2024-12-07T12:48:50,348 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 05976781667afccaf4cfd2929edf2476/cf2 of 05976781667afccaf4cfd2929edf2476 into 2eb796afbb1144de9525f939ed12ed83(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:48:50,348 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 05976781667afccaf4cfd2929edf2476: 2024-12-07T12:48:50,352 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=05976781667afccaf4cfd2929edf2476, source=2d46b487c067,39787,1733575714772, destination=2d46b487c067,39027,1733575714856, warming up region on 2d46b487c067,39027,1733575714856 2024-12-07T12:48:50,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=05976781667afccaf4cfd2929edf2476, source=2d46b487c067,39787,1733575714772, destination=2d46b487c067,39027,1733575714856, running balancer 2024-12-07T12:48:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE 2024-12-07T12:48:50,354 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE 2024-12-07T12:48:50,356 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,356 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=CLOSING, regionLocation=2d46b487c067,39787,1733575714772 2024-12-07T12:48:50,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(7855): Warmup {ENCODED => 05976781667afccaf4cfd2929edf2476, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:50,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:50,357 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,359 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf1 2024-12-07T12:48:50,359 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:50,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE because future has completed 2024-12-07T12:48:50,360 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T12:48:50,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39787,1733575714772}] 2024-12-07T12:48:50,369 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/6b7bca0a87834eb2ba7ae7598ca1f2ef 2024-12-07T12:48:50,374 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b 2024-12-07T12:48:50,380 INFO [StoreFileOpener-05976781667afccaf4cfd2929edf2476-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d646a45513534a849a905c34af3b0e63 2024-12-07T12:48:50,380 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/d646a45513534a849a905c34af3b0e63 2024-12-07T12:48:50,380 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:50,380 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,381 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf2 2024-12-07T12:48:50,382 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:50,388 INFO [StoreFileOpener-05976781667afccaf4cfd2929edf2476-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 212bdbe6a9ed42379b1ef57f391f4293 2024-12-07T12:48:50,388 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/212bdbe6a9ed42379b1ef57f391f4293 2024-12-07T12:48:50,394 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/2eb796afbb1144de9525f939ed12ed83 2024-12-07T12:48:50,394 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:50,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(1722): Closing 05976781667afccaf4cfd2929edf2476, disabling compactions & flushes 2024-12-07T12:48:50,394 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. after waiting 0 ms 2024-12-07T12:48:50,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39027 {}] regionserver.HRegion(1676): Region close journal for 05976781667afccaf4cfd2929edf2476: Waiting for close lock at 1733575730394Disabling compacts and flushes for region at 1733575730394Disabling writes for close at 1733575730394Writing region close event to WAL at 1733575730395 (+1 ms)Closed at 1733575730395 2024-12-07T12:48:50,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-12-07T12:48:50,515 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,515 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-07T12:48:50,515 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 05976781667afccaf4cfd2929edf2476, disabling compactions & flushes 2024-12-07T12:48:50,515 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,515 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,515 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. after waiting 0 ms 2024-12-07T12:48:50,515 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,516 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b, hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/d646a45513534a849a905c34af3b0e63] to archive 2024-12-07T12:48:50,520 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:48:50,525 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b to hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/c820feae5a94453c97ada0cd81ef3d6b 2024-12-07T12:48:50,527 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/d646a45513534a849a905c34af3b0e63 to hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/d646a45513534a849a905c34af3b0e63 2024-12-07T12:48:50,543 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/212bdbe6a9ed42379b1ef57f391f4293] to archive 2024-12-07T12:48:50,544 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:48:50,547 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/212bdbe6a9ed42379b1ef57f391f4293 to hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/212bdbe6a9ed42379b1ef57f391f4293 2024-12-07T12:48:50,553 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-12-07T12:48:50,554 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,554 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 05976781667afccaf4cfd2929edf2476: Waiting for close lock at 1733575730515Running coprocessor pre-close hooks at 1733575730515Disabling compacts and flushes for region at 1733575730515Disabling writes for close at 1733575730515Writing region close event to WAL at 1733575730548 (+33 ms)Running coprocessor post-close hooks at 1733575730554 (+6 ms)Closed at 1733575730554 2024-12-07T12:48:50,554 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 05976781667afccaf4cfd2929edf2476 move to 2d46b487c067,39027,1733575714856 record at close sequenceid=12 2024-12-07T12:48:50,557 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,558 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=CLOSED 2024-12-07T12:48:50,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39787,1733575714772 because future has completed 2024-12-07T12:48:50,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-07T12:48:50,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39787,1733575714772 in 202 msec 2024-12-07T12:48:50,566 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE; state=CLOSED, location=2d46b487c067,39027,1733575714856; forceNewPlan=false, retain=false 2024-12-07T12:48:50,717 INFO [2d46b487c067:37233 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T12:48:50,717 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPENING, regionLocation=2d46b487c067,39027,1733575714856 2024-12-07T12:48:50,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE because future has completed 2024-12-07T12:48:50,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856}] 2024-12-07T12:48:50,878 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,878 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 05976781667afccaf4cfd2929edf2476, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:50,878 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,878 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:50,878 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,878 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,880 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,881 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf1 2024-12-07T12:48:50,881 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:50,889 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/6b7bca0a87834eb2ba7ae7598ca1f2ef 2024-12-07T12:48:50,889 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:50,890 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,891 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf2 2024-12-07T12:48:50,891 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:50,899 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/2eb796afbb1144de9525f939ed12ed83 2024-12-07T12:48:50,899 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:50,899 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,900 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,902 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,902 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,902 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,903 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T12:48:50,905 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,905 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 05976781667afccaf4cfd2929edf2476; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70488797, jitterRate=0.05036492645740509}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T12:48:50,905 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,906 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 05976781667afccaf4cfd2929edf2476: Running coprocessor pre-open hook at 1733575730879Writing region info on filesystem at 1733575730879Initializing all the Stores at 1733575730879Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575730880 (+1 ms)Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575730880Cleaning up temporary data from old regions at 1733575730902 (+22 ms)Running coprocessor post-open hooks at 1733575730905 (+3 ms)Region opened successfully at 1733575730906 (+1 ms) 2024-12-07T12:48:50,907 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., pid=12, masterSystemTime=1733575730873 2024-12-07T12:48:50,910 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,910 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,910 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPEN, openSeqNum=18, regionLocation=2d46b487c067,39027,1733575714856 2024-12-07T12:48:50,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856 because future has completed 2024-12-07T12:48:50,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-07T12:48:50,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,39027,1733575714856 in 193 msec 2024-12-07T12:48:50,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, REOPEN/MOVE in 564 msec 2024-12-07T12:48:50,956 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:48:50,958 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:48:50,960 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server 2d46b487c067,39027,1733575714856: testing ***** 2024-12-07T12:48:50,960 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-07T12:48:50,962 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-07T12:48:50,964 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-07T12:48:50,966 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-07T12:48:50,966 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-07T12:48:50,972 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 271590608 }, "NonHeapMemoryUsage": { "committed": 170065920, "init": 7667712, "max": -1, "used": 167479520 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "2d46b487c067", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2068, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 1, "ProcessCallTime_max": 9, "ProcessCallTime_mean": 4, "ProcessCallTime_25th_percentile": 3, "ProcessCallTime_median": 5, "ProcessCallTime_75th_percentile": 7, "ProcessCallTime_90th_percentile": 8, "ProcessCallTime_95th_percentile": 8, "ProcessCallTime_98th_percentile": 8, "ProcessCallTime_99th_percentile": 8, "ProcessCallTime_99.9th_percentile": 8, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 0, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 9, "TotalCallTime_mean": 4, "TotalCallTime_25th_percentile": 3, "TotalCallTime_median": 5, "TotalCallTime_75th_percentile": 7, "TotalCallTime_90th_percentile": 8, "TotalCallTime_95th_percentile": 8, "TotalCallTime_98th_percentile": 8, "TotalCallTime_99th_percentile": 8, "TotalCallTime_99.9th_percentile": 8, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 174, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 130, "ResponseSize_90th_percentile": 156, "ResponseSize_95th_percentile": 165, "ResponseSize_98th_percentile": 170, "ResponseSize_99th_percentile": 172, "ResponseSize_99.9th_percentile": 173, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 348 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "2d46b487c067", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:62259", "tag.serverName": "2d46b487c067,44445,1733575714899", "tag.clusterId": "b41e2b7d-ff03-4be2-9172-a81b81a933a0", "tag.Context": "regionserver", "tag.Hostname": "2d46b487c067", "regionCount": 1, "storeCount": 4, "hlogFileCount": 2, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 74, "memStoreHeapSize": 1248, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1733575714899, "averageRegionSize": 74, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 1.4, "writeRequestRatePerSecond": 0.4, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 199680, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 1, "activeScanners": 0, "totalRequestCount": 7, "totalRowActionRequestCount": 9, "readRequestCount": 7, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 2, "rpcGetRequestCount": 1, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 4, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 2, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 4, "ScanTime_min": 0, "ScanTime_max": 6, "ScanTime_mean": 1, "ScanTime_25th_percentile": 1, "ScanTime_median": 3, "ScanTime_75th_percentile": 4, "ScanTime_90th_percentile": 5, "ScanTime_95th_percentile": 5, "ScanTime_98th_percentile": 5, "ScanTime_99th_percentile": 5, "ScanTime_99.9th_percentile": 5, "ScanTime_TimeRangeCount_0-1": 4, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 2, "Put_min": 1, "Put_max": 7, "Put_mean": 4, "Put_25th_percentile": 2, "Put_median": 4, "Put_75th_percentile": 5, "Put_90th_percentile": 6, "Put_95th_percentile": 6, "Put_98th_percentile": 6, "Put_99th_percentile": 6, "Put_99.9th_percentile": 6, "Put_TimeRangeCount_0-1": 2, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 1, "Get_min": 2, "Get_max": 2, "Get_mean": 2, "Get_25th_percentile": 2, "Get_median": 2, "Get_75th_percentile": 2, "Get_90th_percentile": 2, "Get_95th_percentile": 2, "Get_98th_percentile": 2, "Get_99th_percentile": 2, "Get_99.9th_percentile": 2, "Get_TimeRangeCount_0-1": 1, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 4, "ScanSize_min": 0, "ScanSize_max": 144, "ScanSize_mean": 72, "ScanSize_25th_percentile": 36, "ScanSize_median": 72, "ScanSize_75th_percentile": 108, "ScanSize_90th_percentile": 129, "ScanSize_95th_percentile": 136, "ScanSize_98th_percentile": 141, "ScanSize_99th_percentile": 142, "ScanSize_99.9th_percentile": 143, "ScanSize_SizeRangeCount_0-10": 4, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-12-07T12:48:50,975 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37233 {}] master.MasterRpcServices(700): 2d46b487c067,39027,1733575714856 reported a fatal error: ***** ABORTING region server 2d46b487c067,39027,1733575714856: testing ***** 2024-12-07T12:48:50,977 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2d46b487c067,39027,1733575714856' ***** 2024-12-07T12:48:50,977 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-12-07T12:48:50,978 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:48:50,978 INFO [RS:1;2d46b487c067:39027 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-12-07T12:48:50,978 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:48:50,978 INFO [RS:1;2d46b487c067:39027 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-12-07T12:48:50,979 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(3091): Received CLOSE for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,979 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(956): aborting server 2d46b487c067,39027,1733575714856 2024-12-07T12:48:50,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39787 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.2:43664 deadline: 1733575790979, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39027 startCode=1733575714856. As of locationSeqNum=12. 2024-12-07T12:48:50,979 INFO [RS:1;2d46b487c067:39027 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:48:50,979 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 05976781667afccaf4cfd2929edf2476, disabling compactions & flushes 2024-12-07T12:48:50,979 INFO [RS:1;2d46b487c067:39027 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2d46b487c067:39027. 2024-12-07T12:48:50,979 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,979 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39787,1733575714772, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39787,1733575714772, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39027 startCode=1733575714856. As of locationSeqNum=12. 2024-12-07T12:48:50,979 DEBUG [RS:1;2d46b487c067:39027 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:48:50,979 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,979 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39787,1733575714772, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39027 startCode=1733575714856. As of locationSeqNum=12. 2024-12-07T12:48:50,979 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. after waiting 0 ms 2024-12-07T12:48:50,980 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39787,1733575714772, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2d46b487c067 port=39027 startCode=1733575714856. As of locationSeqNum=12. 2024-12-07T12:48:50,980 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,980 DEBUG [RS:1;2d46b487c067:39027 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:48:50,980 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T12:48:50,980 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1325): Online Regions={05976781667afccaf4cfd2929edf2476=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.} 2024-12-07T12:48:50,981 DEBUG [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1351): Waiting on 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:50,982 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:50,982 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 05976781667afccaf4cfd2929edf2476: Waiting for close lock at 1733575730979Running coprocessor pre-close hooks at 1733575730979Disabling compacts and flushes for region at 1733575730979Disabling writes for close at 1733575730980 (+1 ms)Writing region close event to WAL at 1733575730982 (+2 ms)Running coprocessor post-close hooks at 1733575730982Closed at 1733575730982 2024-12-07T12:48:50,982 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:51,030 INFO [regionserver/2d46b487c067:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:48:51,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2d46b487c067,39027,1733575714856 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:51,092 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=12, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2d46b487c067,39027,1733575714856 aborting 2024-12-07T12:48:51,092 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=12 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2d46b487c067,39027,1733575714856 aborting 2024-12-07T12:48:51,092 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=12 from cache 2024-12-07T12:48:51,181 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(976): stopping server 2d46b487c067,39027,1733575714856; all regions closed. 2024-12-07T12:48:51,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741834_1010 (size=2142) 2024-12-07T12:48:51,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741834_1010 (size=2142) 2024-12-07T12:48:51,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741834_1010 (size=2142) 2024-12-07T12:48:51,189 DEBUG [RS:1;2d46b487c067:39027 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:48:51,189 INFO [RS:1;2d46b487c067:39027 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:48:51,189 INFO [RS:1;2d46b487c067:39027 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:48:51,189 INFO [RS:1;2d46b487c067:39027 {}] hbase.ChoreService(370): Chore service for: regionserver/2d46b487c067:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T12:48:51,189 INFO [regionserver/2d46b487c067:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:48:51,190 INFO [RS:1;2d46b487c067:39027 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:48:51,190 INFO [RS:1;2d46b487c067:39027 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:48:51,190 INFO [RS:1;2d46b487c067:39027 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:48:51,190 INFO [RS:1;2d46b487c067:39027 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:48:51,190 INFO [RS:1;2d46b487c067:39027 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39027 2024-12-07T12:48:51,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:48:51,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2d46b487c067,39027,1733575714856 2024-12-07T12:48:51,195 INFO [RS:1;2d46b487c067:39027 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:48:51,196 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2d46b487c067,39027,1733575714856] 2024-12-07T12:48:51,196 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2d46b487c067,39027,1733575714856 already deleted, retry=false 2024-12-07T12:48:51,197 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of 2d46b487c067,39027,1733575714856 on 2d46b487c067,37233,1733575714217 2024-12-07T12:48:51,201 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 2d46b487c067,39027,1733575714856, splitWal=true, meta=false 2024-12-07T12:48:51,204 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for 2d46b487c067,39027,1733575714856 (carryingMeta=false) 2d46b487c067,39027,1733575714856/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@11719d65[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-12-07T12:48:51,204 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 2d46b487c067,39027,1733575714856, splitWal=true, meta=false 2024-12-07T12:48:51,205 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(207): 2d46b487c067,39027,1733575714856 had 1 regions 2024-12-07T12:48:51,207 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 2d46b487c067,39027,1733575714856, splitWal=true, meta=false, isMeta: false 2024-12-07T12:48:51,208 DEBUG [PEWorker-5 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting 2024-12-07T12:48:51,209 INFO [PEWorker-5 {}] master.SplitWALManager(105): 2d46b487c067,39027,1733575714856 WAL count=1, meta=false 2024-12-07T12:48:51,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 2d46b487c067%2C39027%2C1733575714856.1733575716065}] 2024-12-07T12:48:51,216 DEBUG [PEWorker-1 {}] master.SplitWALManager(158): Acquired split WAL worker=2d46b487c067,44445,1733575714899 2024-12-07T12:48:51,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 2d46b487c067%2C39027%2C1733575714856.1733575716065, worker=2d46b487c067,44445,1733575714899}] 2024-12-07T12:48:51,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:48:51,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39027-0x100b4b97f370002, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:48:51,297 INFO [RS:1;2d46b487c067:39027 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:48:51,297 INFO [RS:1;2d46b487c067:39027 {}] regionserver.HRegionServer(1031): Exiting; stopping=2d46b487c067,39027,1733575714856; zookeeper connection closed. 2024-12-07T12:48:51,298 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2b6f4587 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2b6f4587 2024-12-07T12:48:51,303 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18] 2024-12-07T12:48:51,305 WARN [RPCClient-NioEventLoopGroup-6-5 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 2d46b487c067:39027 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 2d46b487c067/172.17.0.2:39027 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:51,305 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18, error=java.net.ConnectException: Call to address=2d46b487c067:39027 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 2d46b487c067/172.17.0.2:39027 2024-12-07T12:48:51,305 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18 is java.net.ConnectException: Connection refused 2024-12-07T12:48:51,305 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18 from cache 2024-12-07T12:48:51,306 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.FailedServers(52): Added failed server with address 2d46b487c067:39027 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 2d46b487c067/172.17.0.2:39027 2024-12-07T12:48:51,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44445 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-12-07T12:48:51,403 INFO [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065, size=2.1 K (2142bytes) 2024-12-07T12:48:51,403 INFO [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065 2024-12-07T12:48:51,403 INFO [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065 after 0ms 2024-12-07T12:48:51,407 INFO [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065 took 4ms 2024-12-07T12:48:51,414 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 05976781667afccaf4cfd2929edf2476: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-12-07T12:48:51,415 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065 so closing down 2024-12-07T12:48:51,415 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:51,415 INFO [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:51,415 INFO [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 8 ms; skipped=6; WAL=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065, size=2.1 K, length=2142, corrupted=false, cancelled=false 2024-12-07T12:48:51,415 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065, journal: Splitting hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065, size=2.1 K (2142bytes) at 1733575731403Finishing writing output for hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065 so closing down at 1733575731415 (+12 ms)3 split writer threads finished at 1733575731415Processed 6 edits across 0 Regions in 8 ms; skipped=6; WAL=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065, size=2.1 K, length=2142, corrupted=false, cancelled=false at 1733575731415 2024-12-07T12:48:51,415 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065 2024-12-07T12:48:51,417 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-07T12:48:51,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-12-07T12:48:51,424 INFO [PEWorker-3 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting/2d46b487c067%2C39027%2C1733575714856.1733575716065 to hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs 2024-12-07T12:48:51,427 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-07T12:48:51,427 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 2d46b487c067%2C39027%2C1733575714856.1733575716065, worker=2d46b487c067,44445,1733575714899 in 206 msec 2024-12-07T12:48:51,428 DEBUG [PEWorker-4 {}] master.SplitWALManager(172): Release split WAL worker=2d46b487c067,44445,1733575714899 2024-12-07T12:48:51,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-07T12:48:51,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 2d46b487c067%2C39027%2C1733575714856.1733575716065, worker=2d46b487c067,44445,1733575714899 in 218 msec 2024-12-07T12:48:51,434 INFO [PEWorker-5 {}] master.SplitLogManager(171): hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting dir is empty, no logs to split. 2024-12-07T12:48:51,434 INFO [PEWorker-5 {}] master.SplitWALManager(105): 2d46b487c067,39027,1733575714856 WAL count=0, meta=false 2024-12-07T12:48:51,434 DEBUG [PEWorker-5 {}] procedure.ServerCrashProcedure(329): Check if 2d46b487c067,39027,1733575714856 WAL splitting is done? wals=0, meta=false 2024-12-07T12:48:51,436 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(321): Remove WAL directory for 2d46b487c067,39027,1733575714856 failed, ignore...File hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/WALs/2d46b487c067,39027,1733575714856-splitting does not exist. 2024-12-07T12:48:51,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN}] 2024-12-07T12:48:51,440 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN 2024-12-07T12:48:51,441 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-07T12:48:51,592 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(204): Hosts are {2d46b487c067=0} racks are {/default-rack=0} 2024-12-07T12:48:51,592 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T12:48:51,592 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T12:48:51,592 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T12:48:51,592 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T12:48:51,593 INFO [2d46b487c067:37233 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T12:48:51,593 INFO [2d46b487c067:37233 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T12:48:51,593 DEBUG [2d46b487c067:37233 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T12:48:51,594 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPENING, regionLocation=2d46b487c067,44445,1733575714899 2024-12-07T12:48:51,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN because future has completed 2024-12-07T12:48:51,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,44445,1733575714899}] 2024-12-07T12:48:51,612 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18] 2024-12-07T12:48:51,613 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.AbstractRpcClient(357): Not trying to connect to 2d46b487c067:39027 this server is in the failed servers list 2024-12-07T12:48:51,614 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=2d46b487c067:39027 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2d46b487c067:39027 2024-12-07T12:48:51,614 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2d46b487c067:39027 2024-12-07T12:48:51,614 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,39027,1733575714856, seqNum=18 from cache 2024-12-07T12:48:51,767 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:51,768 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 05976781667afccaf4cfd2929edf2476, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:51,769 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,769 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:51,769 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7794): checking encryption for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,769 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7797): checking classloading for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,771 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,771 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf1 2024-12-07T12:48:51,772 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:51,778 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf1/6b7bca0a87834eb2ba7ae7598ca1f2ef 2024-12-07T12:48:51,779 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:51,779 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,780 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05976781667afccaf4cfd2929edf2476 columnFamilyName cf2 2024-12-07T12:48:51,780 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:51,787 DEBUG [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/cf2/2eb796afbb1144de9525f939ed12ed83 2024-12-07T12:48:51,787 INFO [StoreOpener-05976781667afccaf4cfd2929edf2476-1 {}] regionserver.HStore(327): Store=05976781667afccaf4cfd2929edf2476/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:51,787 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1038): replaying wal for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,788 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,790 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,791 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1048): stopping wal replay for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,791 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,791 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-07T12:48:51,793 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1093): writing seq id for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,794 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1114): Opened 05976781667afccaf4cfd2929edf2476; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71096558, jitterRate=0.059421271085739136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-07T12:48:51,794 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:48:51,795 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1006): Region open journal for 05976781667afccaf4cfd2929edf2476: Running coprocessor pre-open hook at 1733575731769Writing region info on filesystem at 1733575731769Initializing all the Stores at 1733575731770 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575731770Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575731770Cleaning up temporary data from old regions at 1733575731791 (+21 ms)Running coprocessor post-open hooks at 1733575731794 (+3 ms)Region opened successfully at 1733575731795 (+1 ms) 2024-12-07T12:48:51,796 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., pid=17, masterSystemTime=1733575731757 2024-12-07T12:48:51,798 DEBUG [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:51,799 INFO [RS_OPEN_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:48:51,799 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=05976781667afccaf4cfd2929edf2476, regionState=OPEN, openSeqNum=18, regionLocation=2d46b487c067,44445,1733575714899 2024-12-07T12:48:51,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,44445,1733575714899 because future has completed 2024-12-07T12:48:51,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-12-07T12:48:51,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 05976781667afccaf4cfd2929edf2476, server=2d46b487c067,44445,1733575714899 in 199 msec 2024-12-07T12:48:51,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-12-07T12:48:51,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=05976781667afccaf4cfd2929edf2476, ASSIGN in 367 msec 2024-12-07T12:48:51,808 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(291): removed crashed server 2d46b487c067,39027,1733575714856 after splitting done 2024-12-07T12:48:51,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure 2d46b487c067,39027,1733575714856, splitWal=true, meta=false in 611 msec 2024-12-07T12:48:52,133 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., hostname=2d46b487c067,44445,1733575714899, seqNum=18] 2024-12-07T12:48:52,151 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testReplayEditsAfterRegionMovedWithMultiCF Thread=411 (was 409) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_LOG_REPLAY_OPS-regionserver/2d46b487c067:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/2d46b487c067:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2093183973_22 at /127.0.0.1:39724 [Waiting for operation #25] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-662330648_22 at /127.0.0.1:50644 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/2d46b487c067:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-662330648_22 at /127.0.0.1:42792 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1050 (was 1022) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=214 (was 198) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5635 (was 5684) 2024-12-07T12:48:52,153 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1050 is superior to 1024 2024-12-07T12:48:52,165 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testReplayEditsAfterPartialFlush Thread=411, OpenFileDescriptor=1050, MaxFileDescriptor=1048576, SystemLoadAverage=214, ProcessCount=11, AvailableMemoryMB=5633 2024-12-07T12:48:52,165 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1050 is superior to 1024 2024-12-07T12:48:52,181 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:52,183 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:52,183 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:52,186 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-88609547, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-88609547, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:52,197 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-88609547/hregion-88609547.1733575732186, exclude list is [], retry=0 2024-12-07T12:48:52,200 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:52,200 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:52,200 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:52,206 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-88609547/hregion-88609547.1733575732186 2024-12-07T12:48:52,206 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:52,206 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 4b62931c4de2e5a4438531cb8c6ba5d0, NAME => 'testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:48:52,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741893_1071 (size=67) 2024-12-07T12:48:52,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741893_1071 (size=67) 2024-12-07T12:48:52,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741893_1071 (size=67) 2024-12-07T12:48:52,215 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:52,216 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,217 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName a 2024-12-07T12:48:52,217 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,218 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,218 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,219 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName b 2024-12-07T12:48:52,219 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,220 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,220 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,222 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName c 2024-12-07T12:48:52,222 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,223 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,223 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,224 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,224 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,225 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,225 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,226 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:52,227 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,229 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:52,229 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4b62931c4de2e5a4438531cb8c6ba5d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71508759, jitterRate=0.06556354463100433}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:52,230 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4b62931c4de2e5a4438531cb8c6ba5d0: Writing region info on filesystem at 1733575732215Initializing all the Stores at 1733575732216 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732216Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732216Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732216Cleaning up temporary data from old regions at 1733575732225 (+9 ms)Region opened successfully at 1733575732230 (+5 ms) 2024-12-07T12:48:52,230 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 4b62931c4de2e5a4438531cb8c6ba5d0, disabling compactions & flushes 2024-12-07T12:48:52,230 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,230 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,230 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. after waiting 0 ms 2024-12-07T12:48:52,230 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,231 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,231 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 4b62931c4de2e5a4438531cb8c6ba5d0: Waiting for close lock at 1733575732230Disabling compacts and flushes for region at 1733575732230Disabling writes for close at 1733575732230Writing region close event to WAL at 1733575732231 (+1 ms)Closed at 1733575732231 2024-12-07T12:48:52,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741892_1070 (size=93) 2024-12-07T12:48:52,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741892_1070 (size=93) 2024-12-07T12:48:52,235 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:52,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741892_1070 (size=93) 2024-12-07T12:48:52,235 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-88609547:(num 1733575732186) 2024-12-07T12:48:52,235 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:52,237 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:52,249 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237, exclude list is [], retry=0 2024-12-07T12:48:52,252 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:52,252 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:52,253 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:52,254 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237 2024-12-07T12:48:52,255 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:48:52,255 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 4b62931c4de2e5a4438531cb8c6ba5d0, NAME => 'testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:52,255 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:52,255 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,255 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,256 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,257 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName a 2024-12-07T12:48:52,257 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,257 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,258 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,258 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName b 2024-12-07T12:48:52,258 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,259 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,259 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,260 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName c 2024-12-07T12:48:52,260 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,260 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,260 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,261 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,262 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,263 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,263 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,264 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:52,266 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,267 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4b62931c4de2e5a4438531cb8c6ba5d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65129245, jitterRate=-0.029498621821403503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:52,268 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4b62931c4de2e5a4438531cb8c6ba5d0: Writing region info on filesystem at 1733575732255Initializing all the Stores at 1733575732256 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732256Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732256Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732256Cleaning up temporary data from old regions at 1733575732264 (+8 ms)Region opened successfully at 1733575732268 (+4 ms) 2024-12-07T12:48:52,299 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4b62931c4de2e5a4438531cb8c6ba5d0 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-07T12:48:52,316 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/a/2db2f345c01141c1a8e073577c8a3294 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733575732269/Put/seqid=0 2024-12-07T12:48:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741895_1073 (size=5958) 2024-12-07T12:48:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741895_1073 (size=5958) 2024-12-07T12:48:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741895_1073 (size=5958) 2024-12-07T12:48:52,328 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/a/2db2f345c01141c1a8e073577c8a3294 2024-12-07T12:48:52,354 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/b/5819d0aebcf64257bb5428bca5cb163e is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733575732279/Put/seqid=0 2024-12-07T12:48:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741896_1074 (size=5958) 2024-12-07T12:48:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741896_1074 (size=5958) 2024-12-07T12:48:52,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741896_1074 (size=5958) 2024-12-07T12:48:52,361 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/b/5819d0aebcf64257bb5428bca5cb163e 2024-12-07T12:48:52,380 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/c/fced8a5a47384dcda6b4860491c63da4 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733575732288/Put/seqid=0 2024-12-07T12:48:52,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741897_1075 (size=5958) 2024-12-07T12:48:52,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741897_1075 (size=5958) 2024-12-07T12:48:52,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741897_1075 (size=5958) 2024-12-07T12:48:52,387 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/c/fced8a5a47384dcda6b4860491c63da4 2024-12-07T12:48:52,393 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/a/2db2f345c01141c1a8e073577c8a3294 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/a/2db2f345c01141c1a8e073577c8a3294 2024-12-07T12:48:52,399 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/a/2db2f345c01141c1a8e073577c8a3294, entries=10, sequenceid=33, filesize=5.8 K 2024-12-07T12:48:52,400 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/b/5819d0aebcf64257bb5428bca5cb163e as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/b/5819d0aebcf64257bb5428bca5cb163e 2024-12-07T12:48:52,407 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/b/5819d0aebcf64257bb5428bca5cb163e, entries=10, sequenceid=33, filesize=5.8 K 2024-12-07T12:48:52,408 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/c/fced8a5a47384dcda6b4860491c63da4 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/c/fced8a5a47384dcda6b4860491c63da4 2024-12-07T12:48:52,415 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/c/fced8a5a47384dcda6b4860491c63da4, entries=10, sequenceid=33, filesize=5.8 K 2024-12-07T12:48:52,416 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 4b62931c4de2e5a4438531cb8c6ba5d0 in 117ms, sequenceid=33, compaction requested=false 2024-12-07T12:48:52,417 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4b62931c4de2e5a4438531cb8c6ba5d0: 2024-12-07T12:48:52,417 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 4b62931c4de2e5a4438531cb8c6ba5d0, disabling compactions & flushes 2024-12-07T12:48:52,417 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,417 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,417 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. after waiting 0 ms 2024-12-07T12:48:52,417 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,418 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. 2024-12-07T12:48:52,418 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 4b62931c4de2e5a4438531cb8c6ba5d0: Waiting for close lock at 1733575732417Disabling compacts and flushes for region at 1733575732417Disabling writes for close at 1733575732417Writing region close event to WAL at 1733575732418 (+1 ms)Closed at 1733575732418 2024-12-07T12:48:52,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741894_1072 (size=5937) 2024-12-07T12:48:52,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741894_1072 (size=5937) 2024-12-07T12:48:52,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741894_1072 (size=5937) 2024-12-07T12:48:52,430 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/b/5819d0aebcf64257bb5428bca5cb163e to hdfs://localhost:43841/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/b/5819d0aebcf64257bb5428bca5cb163e 2024-12-07T12:48:52,452 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237, size=5.8 K (5937bytes) 2024-12-07T12:48:52,452 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237 2024-12-07T12:48:52,452 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237 after 0ms 2024-12-07T12:48:52,455 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237 took 4ms 2024-12-07T12:48:52,457 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237 so closing down 2024-12-07T12:48:52,457 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:48:52,458 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733575732237.temp 2024-12-07T12:48:52,460 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000003-wal.1733575732237.temp 2024-12-07T12:48:52,460 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:48:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741898_1076 (size=5223) 2024-12-07T12:48:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741898_1076 (size=5223) 2024-12-07T12:48:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741898_1076 (size=5223) 2024-12-07T12:48:52,468 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000003-wal.1733575732237.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-07T12:48:52,469 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000003-wal.1733575732237.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000032 2024-12-07T12:48:52,469 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 14 ms; skipped=2; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237, size=5.8 K, length=5937, corrupted=false, cancelled=false 2024-12-07T12:48:52,470 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237, journal: Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237, size=5.8 K (5937bytes) at 1733575732452Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237 so closing down at 1733575732457 (+5 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000003-wal.1733575732237.temp at 1733575732460 (+3 ms)3 split writer threads finished at 1733575732460Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000003-wal.1733575732237.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733575732468 (+8 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000003-wal.1733575732237.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000032 at 1733575732469 (+1 ms)Processed 32 edits across 1 Regions in 14 ms; skipped=2; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237, size=5.8 K, length=5937, corrupted=false, cancelled=false at 1733575732470 (+1 ms) 2024-12-07T12:48:52,471 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732237 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575732237 2024-12-07T12:48:52,472 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000032 2024-12-07T12:48:52,472 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:52,474 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:52,492 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732475, exclude list is [], retry=0 2024-12-07T12:48:52,495 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:52,496 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:52,496 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:52,498 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732475 2024-12-07T12:48:52,498 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:52,498 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 4b62931c4de2e5a4438531cb8c6ba5d0, NAME => 'testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:52,498 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:52,498 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,498 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,501 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,502 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName a 2024-12-07T12:48:52,502 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,507 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/a/2db2f345c01141c1a8e073577c8a3294 2024-12-07T12:48:52,507 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,508 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,509 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName b 2024-12-07T12:48:52,509 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,510 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,510 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,511 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b62931c4de2e5a4438531cb8c6ba5d0 columnFamilyName c 2024-12-07T12:48:52,511 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,517 DEBUG [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/c/fced8a5a47384dcda6b4860491c63da4 2024-12-07T12:48:52,517 INFO [StoreOpener-4b62931c4de2e5a4438531cb8c6ba5d0-1 {}] regionserver.HStore(327): Store=4b62931c4de2e5a4438531cb8c6ba5d0/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,518 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,518 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,520 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,520 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000032 2024-12-07T12:48:52,524 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000032 2024-12-07T12:48:52,524 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4b62931c4de2e5a4438531cb8c6ba5d0 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-07T12:48:52,538 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/b/7ae00d090bfc403abff5fdec40cc35b4 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733575732279/Put/seqid=0 2024-12-07T12:48:52,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741900_1078 (size=5958) 2024-12-07T12:48:52,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741900_1078 (size=5958) 2024-12-07T12:48:52,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741900_1078 (size=5958) 2024-12-07T12:48:52,546 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/b/7ae00d090bfc403abff5fdec40cc35b4 2024-12-07T12:48:52,552 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/.tmp/b/7ae00d090bfc403abff5fdec40cc35b4 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/b/7ae00d090bfc403abff5fdec40cc35b4 2024-12-07T12:48:52,559 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/b/7ae00d090bfc403abff5fdec40cc35b4, entries=10, sequenceid=32, filesize=5.8 K 2024-12-07T12:48:52,559 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 4b62931c4de2e5a4438531cb8c6ba5d0 in 35ms, sequenceid=32, compaction requested=false; wal=null 2024-12-07T12:48:52,560 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/0000000000000000032 2024-12-07T12:48:52,561 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,561 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,562 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:52,563 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4b62931c4de2e5a4438531cb8c6ba5d0 2024-12-07T12:48:52,566 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/4b62931c4de2e5a4438531cb8c6ba5d0/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-12-07T12:48:52,567 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4b62931c4de2e5a4438531cb8c6ba5d0; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64627398, jitterRate=-0.03697672486305237}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:52,568 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4b62931c4de2e5a4438531cb8c6ba5d0: Writing region info on filesystem at 1733575732499Initializing all the Stores at 1733575732499Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732499Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732501 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732501Obtaining lock to block concurrent updates at 1733575732524 (+23 ms)Preparing flush snapshotting stores in 4b62931c4de2e5a4438531cb8c6ba5d0 at 1733575732524Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1733575732524Flushing stores of testReplayEditsWrittenViaHRegion,,1733575732182.4b62931c4de2e5a4438531cb8c6ba5d0. at 1733575732524Flushing 4b62931c4de2e5a4438531cb8c6ba5d0/b: creating writer at 1733575732524Flushing 4b62931c4de2e5a4438531cb8c6ba5d0/b: appending metadata at 1733575732537 (+13 ms)Flushing 4b62931c4de2e5a4438531cb8c6ba5d0/b: closing flushed file at 1733575732537Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d59f444: reopening flushed file at 1733575732551 (+14 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 4b62931c4de2e5a4438531cb8c6ba5d0 in 35ms, sequenceid=32, compaction requested=false; wal=null at 1733575732559 (+8 ms)Cleaning up temporary data from old regions at 1733575732561 (+2 ms)Region opened successfully at 1733575732568 (+7 ms) 2024-12-07T12:48:52,588 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testReplayEditsAfterPartialFlush Thread=420 (was 411) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:57468 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:34392 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:42944 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:39724 [Waiting for operation #33] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:50644 [Waiting for operation #40] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:42792 [Waiting for operation #29] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1126 (was 1050) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=214 (was 214), ProcessCount=11 (was 11), AvailableMemoryMB=5624 (was 5633) 2024-12-07T12:48:52,589 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1126 is superior to 1024 2024-12-07T12:48:52,599 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testReplayEditsAfterAbortingFlush Thread=420, OpenFileDescriptor=1126, MaxFileDescriptor=1048576, SystemLoadAverage=214, ProcessCount=11, AvailableMemoryMB=5623 2024-12-07T12:48:52,599 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1126 is superior to 1024 2024-12-07T12:48:52,613 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:52,615 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:48:52,615 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:48:52,618 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-18527301, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-18527301, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:52,630 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-18527301/hregion-18527301.1733575732618, exclude list is [], retry=0 2024-12-07T12:48:52,633 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:52,633 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:52,634 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:52,639 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-18527301/hregion-18527301.1733575732618 2024-12-07T12:48:52,639 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:48:52,640 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 781d5dc502cac91bdd7e1c1eb584ea30, NAME => 'testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:48:52,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741902_1080 (size=68) 2024-12-07T12:48:52,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741902_1080 (size=68) 2024-12-07T12:48:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741902_1080 (size=68) 2024-12-07T12:48:52,649 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:52,650 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,651 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName a 2024-12-07T12:48:52,652 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,652 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,652 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,653 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName b 2024-12-07T12:48:52,654 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,654 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,654 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,656 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName c 2024-12-07T12:48:52,656 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,656 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,656 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,657 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,658 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,658 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,659 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,659 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:52,660 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,662 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:48:52,663 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 781d5dc502cac91bdd7e1c1eb584ea30; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68208499, jitterRate=0.016385838389396667}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:52,663 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 781d5dc502cac91bdd7e1c1eb584ea30: Writing region info on filesystem at 1733575732649Initializing all the Stores at 1733575732650 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732650Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732650Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732650Cleaning up temporary data from old regions at 1733575732659 (+9 ms)Region opened successfully at 1733575732663 (+4 ms) 2024-12-07T12:48:52,663 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 781d5dc502cac91bdd7e1c1eb584ea30, disabling compactions & flushes 2024-12-07T12:48:52,663 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:48:52,663 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:48:52,663 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. after waiting 0 ms 2024-12-07T12:48:52,663 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:48:52,665 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:48:52,665 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 781d5dc502cac91bdd7e1c1eb584ea30: Waiting for close lock at 1733575732663Disabling compacts and flushes for region at 1733575732663Disabling writes for close at 1733575732663Writing region close event to WAL at 1733575732665 (+2 ms)Closed at 1733575732665 2024-12-07T12:48:52,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741901_1079 (size=93) 2024-12-07T12:48:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741901_1079 (size=93) 2024-12-07T12:48:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741901_1079 (size=93) 2024-12-07T12:48:52,674 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:48:52,674 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-18527301:(num 1733575732618) 2024-12-07T12:48:52,674 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:48:52,676 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:48:52,688 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676, exclude list is [], retry=0 2024-12-07T12:48:52,691 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:48:52,692 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:48:52,692 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:48:52,694 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676 2024-12-07T12:48:52,694 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:48:52,750 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 781d5dc502cac91bdd7e1c1eb584ea30, NAME => 'testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:48:52,753 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,753 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:48:52,753 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,753 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,755 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,756 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName a 2024-12-07T12:48:52,757 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,757 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,758 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,758 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName b 2024-12-07T12:48:52,759 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,759 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,759 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,760 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName c 2024-12-07T12:48:52,760 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:48:52,761 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:48:52,761 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,762 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,763 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,764 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,764 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,764 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:48:52,765 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,766 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 781d5dc502cac91bdd7e1c1eb584ea30; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61931262, jitterRate=-0.07715228199958801}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:48:52,766 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:48:52,767 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 781d5dc502cac91bdd7e1c1eb584ea30: Running coprocessor pre-open hook at 1733575732753Writing region info on filesystem at 1733575732753Initializing all the Stores at 1733575732754 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732754Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732755 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575732755Cleaning up temporary data from old regions at 1733575732764 (+9 ms)Running coprocessor post-open hooks at 1733575732766 (+2 ms)Region opened successfully at 1733575732767 (+1 ms) 2024-12-07T12:48:52,780 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 781d5dc502cac91bdd7e1c1eb584ea30 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-12-07T12:48:52,781 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:53,782 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:54,666 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T12:48:54,701 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-12-07T12:48:54,701 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:54,701 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-12-07T12:48:54,701 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:54,702 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-12-07T12:48:54,702 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-12-07T12:48:54,783 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:55,784 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:56,785 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:57,786 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:58,787 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:48:59,789 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:00,790 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:01,791 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 781d5dc502cac91bdd7e1c1eb584ea30/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:01,794 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 781d5dc502cac91bdd7e1c1eb584ea30: 2024-12-07T12:49:01,794 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:49:01,810 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 781d5dc502cac91bdd7e1c1eb584ea30: 2024-12-07T12:49:01,810 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-12-07T12:49:01,810 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 781d5dc502cac91bdd7e1c1eb584ea30, disabling compactions & flushes 2024-12-07T12:49:01,810 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:49:01,810 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:49:01,810 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. after waiting 0 ms 2024-12-07T12:49:01,811 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:49:01,811 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:49:01,811 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. 2024-12-07T12:49:01,811 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 781d5dc502cac91bdd7e1c1eb584ea30: Waiting for close lock at 1733575741810Running coprocessor pre-close hooks at 1733575741810Disabling compacts and flushes for region at 1733575741810Disabling writes for close at 1733575741811 (+1 ms)Writing region close event to WAL at 1733575741811Running coprocessor post-close hooks at 1733575741811Closed at 1733575741811 2024-12-07T12:49:01,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741903_1081 (size=3941) 2024-12-07T12:49:01,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741903_1081 (size=3941) 2024-12-07T12:49:01,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741903_1081 (size=3941) 2024-12-07T12:49:01,834 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676, size=3.8 K (3941bytes) 2024-12-07T12:49:01,834 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676 2024-12-07T12:49:01,835 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676 after 1ms 2024-12-07T12:49:01,838 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676 took 4ms 2024-12-07T12:49:01,840 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676 so closing down 2024-12-07T12:49:01,840 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:49:01,841 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1733575732676.temp 2024-12-07T12:49:01,842 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000004-wal.1733575732676.temp 2024-12-07T12:49:01,842 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:49:01,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741904_1082 (size=2983) 2024-12-07T12:49:01,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741904_1082 (size=2983) 2024-12-07T12:49:01,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741904_1082 (size=2983) 2024-12-07T12:49:01,851 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000004-wal.1733575732676.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-12-07T12:49:01,853 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000004-wal.1733575732676.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000026 2024-12-07T12:49:01,853 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676, size=3.8 K, length=3941, corrupted=false, cancelled=false 2024-12-07T12:49:01,853 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676, journal: Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676, size=3.8 K (3941bytes) at 1733575741834Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676 so closing down at 1733575741840 (+6 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000004-wal.1733575732676.temp at 1733575741842 (+2 ms)3 split writer threads finished at 1733575741843 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000004-wal.1733575732676.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1733575741851 (+8 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000004-wal.1733575732676.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000026 at 1733575741853 (+2 ms)Processed 23 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676, size=3.8 K, length=3941, corrupted=false, cancelled=false at 1733575741853 2024-12-07T12:49:01,855 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575732676 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575732676 2024-12-07T12:49:01,856 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000026 2024-12-07T12:49:01,856 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:01,858 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:01,872 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575741858, exclude list is [], retry=0 2024-12-07T12:49:01,875 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:01,875 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:01,876 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:01,877 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575741858 2024-12-07T12:49:01,878 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:49:01,878 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 781d5dc502cac91bdd7e1c1eb584ea30, NAME => 'testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:01,879 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,879 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:01,879 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,879 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,881 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,882 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName a 2024-12-07T12:49:01,882 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:01,882 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:01,883 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,883 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName b 2024-12-07T12:49:01,883 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:01,884 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:01,884 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,885 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 781d5dc502cac91bdd7e1c1eb584ea30 columnFamilyName c 2024-12-07T12:49:01,885 DEBUG [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:01,885 INFO [StoreOpener-781d5dc502cac91bdd7e1c1eb584ea30-1 {}] regionserver.HStore(327): Store=781d5dc502cac91bdd7e1c1eb584ea30/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:01,885 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,886 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,887 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:01,888 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000026 2024-12-07T12:49:01,891 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000026 2024-12-07T12:49:01,891 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 781d5dc502cac91bdd7e1c1eb584ea30 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-12-07T12:49:01,906 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/a/0c90a6282f5a43e4bd7af7ddaa884eee is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1733575741802/Put/seqid=0 2024-12-07T12:49:01,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741906_1084 (size=5523) 2024-12-07T12:49:01,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741906_1084 (size=5523) 2024-12-07T12:49:01,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741906_1084 (size=5523) 2024-12-07T12:49:01,914 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/a/0c90a6282f5a43e4bd7af7ddaa884eee 2024-12-07T12:49:01,943 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/b/f5f3816192464f048206ca46b28b040a is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1733575741795/Put/seqid=0 2024-12-07T12:49:01,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741907_1085 (size=5524) 2024-12-07T12:49:01,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741907_1085 (size=5524) 2024-12-07T12:49:01,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741907_1085 (size=5524) 2024-12-07T12:49:01,950 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/b/f5f3816192464f048206ca46b28b040a 2024-12-07T12:49:01,971 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/c/797f5b564a3546c692eaae48d6bef38b is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1733575741800/Put/seqid=0 2024-12-07T12:49:01,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741908_1086 (size=5457) 2024-12-07T12:49:01,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741908_1086 (size=5457) 2024-12-07T12:49:01,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741908_1086 (size=5457) 2024-12-07T12:49:01,978 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/c/797f5b564a3546c692eaae48d6bef38b 2024-12-07T12:49:01,983 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/a/0c90a6282f5a43e4bd7af7ddaa884eee as hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/a/0c90a6282f5a43e4bd7af7ddaa884eee 2024-12-07T12:49:01,988 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/a/0c90a6282f5a43e4bd7af7ddaa884eee, entries=7, sequenceid=26, filesize=5.4 K 2024-12-07T12:49:01,989 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/b/f5f3816192464f048206ca46b28b040a as hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/b/f5f3816192464f048206ca46b28b040a 2024-12-07T12:49:01,995 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/b/f5f3816192464f048206ca46b28b040a, entries=7, sequenceid=26, filesize=5.4 K 2024-12-07T12:49:01,996 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/.tmp/c/797f5b564a3546c692eaae48d6bef38b as hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/c/797f5b564a3546c692eaae48d6bef38b 2024-12-07T12:49:02,001 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/c/797f5b564a3546c692eaae48d6bef38b, entries=6, sequenceid=26, filesize=5.3 K 2024-12-07T12:49:02,002 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 781d5dc502cac91bdd7e1c1eb584ea30 in 111ms, sequenceid=26, compaction requested=false; wal=null 2024-12-07T12:49:02,003 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/0000000000000000026 2024-12-07T12:49:02,004 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:02,004 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:02,004 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:02,006 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:02,008 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsAfterAbortingFlush/781d5dc502cac91bdd7e1c1eb584ea30/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-12-07T12:49:02,009 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 781d5dc502cac91bdd7e1c1eb584ea30; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67057571, jitterRate=-7.643252611160278E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:02,009 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 781d5dc502cac91bdd7e1c1eb584ea30 2024-12-07T12:49:02,009 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 781d5dc502cac91bdd7e1c1eb584ea30: Running coprocessor pre-open hook at 1733575741879Writing region info on filesystem at 1733575741879Initializing all the Stores at 1733575741880 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575741880Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575741880Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575741880Obtaining lock to block concurrent updates at 1733575741891 (+11 ms)Preparing flush snapshotting stores in 781d5dc502cac91bdd7e1c1eb584ea30 at 1733575741891Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1733575741892 (+1 ms)Flushing stores of testReplayEditsAfterAbortingFlush,,1733575732613.781d5dc502cac91bdd7e1c1eb584ea30. at 1733575741892Flushing 781d5dc502cac91bdd7e1c1eb584ea30/a: creating writer at 1733575741892Flushing 781d5dc502cac91bdd7e1c1eb584ea30/a: appending metadata at 1733575741905 (+13 ms)Flushing 781d5dc502cac91bdd7e1c1eb584ea30/a: closing flushed file at 1733575741905Flushing 781d5dc502cac91bdd7e1c1eb584ea30/b: creating writer at 1733575741930 (+25 ms)Flushing 781d5dc502cac91bdd7e1c1eb584ea30/b: appending metadata at 1733575741943 (+13 ms)Flushing 781d5dc502cac91bdd7e1c1eb584ea30/b: closing flushed file at 1733575741943Flushing 781d5dc502cac91bdd7e1c1eb584ea30/c: creating writer at 1733575741957 (+14 ms)Flushing 781d5dc502cac91bdd7e1c1eb584ea30/c: appending metadata at 1733575741970 (+13 ms)Flushing 781d5dc502cac91bdd7e1c1eb584ea30/c: closing flushed file at 1733575741970Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68c034cb: reopening flushed file at 1733575741982 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@761cc040: reopening flushed file at 1733575741988 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@539f852e: reopening flushed file at 1733575741995 (+7 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 781d5dc502cac91bdd7e1c1eb584ea30 in 111ms, sequenceid=26, compaction requested=false; wal=null at 1733575742002 (+7 ms)Cleaning up temporary data from old regions at 1733575742004 (+2 ms)Running coprocessor post-open hooks at 1733575742009 (+5 ms)Region opened successfully at 1733575742009 2024-12-07T12:49:02,027 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testReplayEditsAfterAbortingFlush Thread=421 (was 420) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:44168 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58326 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:35814 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:44190 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:35824 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741905_1083] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741905_1083, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58348 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1182 (was 1126) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=189 (was 214), ProcessCount=11 (was 11), AvailableMemoryMB=5610 (was 5623) 2024-12-07T12:49:02,027 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1182 is superior to 1024 2024-12-07T12:49:02,040 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testDatalossWhenInputError Thread=421, OpenFileDescriptor=1182, MaxFileDescriptor=1048576, SystemLoadAverage=189, ProcessCount=11, AvailableMemoryMB=5609 2024-12-07T12:49:02,040 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1182 is superior to 1024 2024-12-07T12:49:02,059 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:49:02,060 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:49:02,060 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:49:02,063 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-46984365, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-46984365, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:02,073 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-46984365/hregion-46984365.1733575742063, exclude list is [], retry=0 2024-12-07T12:49:02,075 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:02,075 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:02,076 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:02,077 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-46984365/hregion-46984365.1733575742063 2024-12-07T12:49:02,078 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:49:02,078 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => fd5d3d116dd71a129bf6969f58972116, NAME => 'testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:49:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741910_1088 (size=61) 2024-12-07T12:49:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741910_1088 (size=61) 2024-12-07T12:49:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741910_1088 (size=61) 2024-12-07T12:49:02,090 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:02,091 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,092 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd5d3d116dd71a129bf6969f58972116 columnFamilyName a 2024-12-07T12:49:02,092 DEBUG [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,093 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(327): Store=fd5d3d116dd71a129bf6969f58972116/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,093 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,093 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,094 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,094 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,094 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,095 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,097 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:49:02,097 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened fd5d3d116dd71a129bf6969f58972116; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68065002, jitterRate=0.01424756646156311}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:49:02,098 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for fd5d3d116dd71a129bf6969f58972116: Writing region info on filesystem at 1733575742090Initializing all the Stores at 1733575742091 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742091Cleaning up temporary data from old regions at 1733575742094 (+3 ms)Region opened successfully at 1733575742098 (+4 ms) 2024-12-07T12:49:02,098 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing fd5d3d116dd71a129bf6969f58972116, disabling compactions & flushes 2024-12-07T12:49:02,098 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,098 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,098 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. after waiting 0 ms 2024-12-07T12:49:02,098 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,098 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,098 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for fd5d3d116dd71a129bf6969f58972116: Waiting for close lock at 1733575742098Disabling compacts and flushes for region at 1733575742098Disabling writes for close at 1733575742098Writing region close event to WAL at 1733575742098Closed at 1733575742098 2024-12-07T12:49:02,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741909_1087 (size=93) 2024-12-07T12:49:02,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741909_1087 (size=93) 2024-12-07T12:49:02,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741909_1087 (size=93) 2024-12-07T12:49:02,103 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:49:02,103 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-46984365:(num 1733575742063) 2024-12-07T12:49:02,103 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:02,105 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:02,121 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105, exclude list is [], retry=0 2024-12-07T12:49:02,123 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:02,123 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:02,124 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:02,125 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105 2024-12-07T12:49:02,125 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:49:02,125 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => fd5d3d116dd71a129bf6969f58972116, NAME => 'testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:02,125 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:02,125 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,125 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,127 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,128 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd5d3d116dd71a129bf6969f58972116 columnFamilyName a 2024-12-07T12:49:02,128 DEBUG [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,128 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(327): Store=fd5d3d116dd71a129bf6969f58972116/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,128 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,129 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,130 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,130 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,130 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,132 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,133 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened fd5d3d116dd71a129bf6969f58972116; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73873544, jitterRate=0.10080158710479736}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:49:02,133 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for fd5d3d116dd71a129bf6969f58972116: Writing region info on filesystem at 1733575742126Initializing all the Stores at 1733575742127 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742127Cleaning up temporary data from old regions at 1733575742130 (+3 ms)Region opened successfully at 1733575742133 (+3 ms) 2024-12-07T12:49:02,142 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing fd5d3d116dd71a129bf6969f58972116, disabling compactions & flushes 2024-12-07T12:49:02,142 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,142 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,142 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. after waiting 0 ms 2024-12-07T12:49:02,142 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,142 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,143 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. 2024-12-07T12:49:02,143 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for fd5d3d116dd71a129bf6969f58972116: Waiting for close lock at 1733575742142Disabling compacts and flushes for region at 1733575742142Disabling writes for close at 1733575742142Writing region close event to WAL at 1733575742142Closed at 1733575742143 (+1 ms) 2024-12-07T12:49:02,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741911_1089 (size=1623) 2024-12-07T12:49:02,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741911_1089 (size=1623) 2024-12-07T12:49:02,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741911_1089 (size=1623) 2024-12-07T12:49:02,161 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105, size=1.6 K (1623bytes) 2024-12-07T12:49:02,161 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105 2024-12-07T12:49:02,161 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105 after 0ms 2024-12-07T12:49:02,163 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105 took 2ms 2024-12-07T12:49:02,165 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105 so closing down 2024-12-07T12:49:02,165 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:49:02,166 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733575742105.temp 2024-12-07T12:49:02,167 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000003-wal.1733575742105.temp 2024-12-07T12:49:02,167 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:49:02,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741912_1090 (size=1623) 2024-12-07T12:49:02,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741912_1090 (size=1623) 2024-12-07T12:49:02,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741912_1090 (size=1623) 2024-12-07T12:49:02,173 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000003-wal.1733575742105.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-12-07T12:49:02,174 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000003-wal.1733575742105.temp to hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000012 2024-12-07T12:49:02,174 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 10 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105, size=1.6 K, length=1623, corrupted=false, cancelled=false 2024-12-07T12:49:02,174 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105, journal: Splitting hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105, size=1.6 K (1623bytes) at 1733575742161Finishing writing output for hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105 so closing down at 1733575742165 (+4 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000003-wal.1733575742105.temp at 1733575742167 (+2 ms)3 split writer threads finished at 1733575742167Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000003-wal.1733575742105.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1733575742173 (+6 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000003-wal.1733575742105.temp to hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000012 at 1733575742174 (+1 ms)Processed 10 edits across 1 Regions in 10 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105, size=1.6 K, length=1623, corrupted=false, cancelled=false at 1733575742174 2024-12-07T12:49:02,176 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742105 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575742105 2024-12-07T12:49:02,177 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000012 2024-12-07T12:49:02,509 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:02,511 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:02,522 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742511, exclude list is [], retry=0 2024-12-07T12:49:02,524 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:02,525 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:02,525 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:02,526 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742511 2024-12-07T12:49:02,527 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:49:02,527 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => fd5d3d116dd71a129bf6969f58972116, NAME => 'testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:02,527 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:02,527 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,527 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,530 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,531 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd5d3d116dd71a129bf6969f58972116 columnFamilyName a 2024-12-07T12:49:02,531 DEBUG [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,532 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(327): Store=fd5d3d116dd71a129bf6969f58972116/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,532 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,533 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,535 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,535 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000012 2024-12-07T12:49:02,538 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000012 2024-12-07T12:49:02,538 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing fd5d3d116dd71a129bf6969f58972116 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-12-07T12:49:02,556 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/.tmp/a/434f3044245c424b8d55c0b23a1ea62e is 79, key is testDatalossWhenInputError/a:x0/1733575742133/Put/seqid=0 2024-12-07T12:49:02,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741914_1092 (size=5808) 2024-12-07T12:49:02,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741914_1092 (size=5808) 2024-12-07T12:49:02,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741914_1092 (size=5808) 2024-12-07T12:49:02,565 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/.tmp/a/434f3044245c424b8d55c0b23a1ea62e 2024-12-07T12:49:02,576 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/.tmp/a/434f3044245c424b8d55c0b23a1ea62e as hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/a/434f3044245c424b8d55c0b23a1ea62e 2024-12-07T12:49:02,586 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/a/434f3044245c424b8d55c0b23a1ea62e, entries=10, sequenceid=12, filesize=5.7 K 2024-12-07T12:49:02,586 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for fd5d3d116dd71a129bf6969f58972116 in 48ms, sequenceid=12, compaction requested=false; wal=null 2024-12-07T12:49:02,586 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/0000000000000000012 2024-12-07T12:49:02,587 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,587 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,590 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,593 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-07T12:49:02,594 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened fd5d3d116dd71a129bf6969f58972116; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61369802, jitterRate=-0.08551868796348572}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:49:02,594 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for fd5d3d116dd71a129bf6969f58972116: Writing region info on filesystem at 1733575742527Initializing all the Stores at 1733575742530 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742530Obtaining lock to block concurrent updates at 1733575742538 (+8 ms)Preparing flush snapshotting stores in fd5d3d116dd71a129bf6969f58972116 at 1733575742538Finished memstore snapshotting testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1733575742538Flushing stores of testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116. at 1733575742538Flushing fd5d3d116dd71a129bf6969f58972116/a: creating writer at 1733575742538Flushing fd5d3d116dd71a129bf6969f58972116/a: appending metadata at 1733575742556 (+18 ms)Flushing fd5d3d116dd71a129bf6969f58972116/a: closing flushed file at 1733575742556Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4efd2aa: reopening flushed file at 1733575742574 (+18 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for fd5d3d116dd71a129bf6969f58972116 in 48ms, sequenceid=12, compaction requested=false; wal=null at 1733575742586 (+12 ms)Cleaning up temporary data from old regions at 1733575742587 (+1 ms)Region opened successfully at 1733575742594 (+7 ms) 2024-12-07T12:49:02,597 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => fd5d3d116dd71a129bf6969f58972116, NAME => 'testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:02,598 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733575742059.fd5d3d116dd71a129bf6969f58972116.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:02,598 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,598 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,599 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,600 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd5d3d116dd71a129bf6969f58972116 columnFamilyName a 2024-12-07T12:49:02,600 DEBUG [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,606 DEBUG [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/a/434f3044245c424b8d55c0b23a1ea62e 2024-12-07T12:49:02,606 INFO [StoreOpener-fd5d3d116dd71a129bf6969f58972116-1 {}] regionserver.HStore(327): Store=fd5d3d116dd71a129bf6969f58972116/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,606 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,607 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,608 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,609 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,609 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,611 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for fd5d3d116dd71a129bf6969f58972116 2024-12-07T12:49:02,614 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testDatalossWhenInputError/fd5d3d116dd71a129bf6969f58972116/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-12-07T12:49:02,615 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened fd5d3d116dd71a129bf6969f58972116; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69092294, jitterRate=0.029555410146713257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:49:02,615 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for fd5d3d116dd71a129bf6969f58972116: Writing region info on filesystem at 1733575742598Initializing all the Stores at 1733575742599 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742599Cleaning up temporary data from old regions at 1733575742609 (+10 ms)Region opened successfully at 1733575742615 (+6 ms) 2024-12-07T12:49:02,636 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testDatalossWhenInputError Thread=431 (was 421) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:35884 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:35814 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:44190 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58384 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:44220 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58348 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1122527667-172.17.0.2-1733575711591:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1264 (was 1182) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=189 (was 189), ProcessCount=11 (was 11), AvailableMemoryMB=5604 (was 5609) 2024-12-07T12:49:02,636 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1264 is superior to 1024 2024-12-07T12:49:02,648 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testCompactedBulkLoadedFiles Thread=431, OpenFileDescriptor=1264, MaxFileDescriptor=1048576, SystemLoadAverage=189, ProcessCount=11, AvailableMemoryMB=5603 2024-12-07T12:49:02,648 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1264 is superior to 1024 2024-12-07T12:49:02,665 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:49:02,667 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:49:02,667 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:49:02,670 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-88793570, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-88793570, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:02,685 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-88793570/hregion-88793570.1733575742670, exclude list is [], retry=0 2024-12-07T12:49:02,687 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:02,688 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:02,688 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:02,689 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-88793570/hregion-88793570.1733575742670 2024-12-07T12:49:02,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:49:02,690 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 602af214f7e58feccdc4aabe6121bbed, NAME => 'testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:49:02,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741916_1094 (size=63) 2024-12-07T12:49:02,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741916_1094 (size=63) 2024-12-07T12:49:02,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741916_1094 (size=63) 2024-12-07T12:49:02,701 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:02,702 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,704 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName a 2024-12-07T12:49:02,704 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,704 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,705 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,706 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName b 2024-12-07T12:49:02,706 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,707 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,707 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,708 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName c 2024-12-07T12:49:02,708 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,708 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,709 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,709 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,710 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,711 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,711 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,711 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:02,712 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,714 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:49:02,715 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 602af214f7e58feccdc4aabe6121bbed; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61808319, jitterRate=-0.07898427546024323}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:02,715 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 602af214f7e58feccdc4aabe6121bbed: Writing region info on filesystem at 1733575742701Initializing all the Stores at 1733575742702 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742702Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742702Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742702Cleaning up temporary data from old regions at 1733575742711 (+9 ms)Region opened successfully at 1733575742715 (+4 ms) 2024-12-07T12:49:02,715 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 602af214f7e58feccdc4aabe6121bbed, disabling compactions & flushes 2024-12-07T12:49:02,715 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:02,716 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:02,716 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. after waiting 0 ms 2024-12-07T12:49:02,716 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:02,716 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:02,716 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 602af214f7e58feccdc4aabe6121bbed: Waiting for close lock at 1733575742715Disabling compacts and flushes for region at 1733575742715Disabling writes for close at 1733575742716 (+1 ms)Writing region close event to WAL at 1733575742716Closed at 1733575742716 2024-12-07T12:49:02,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741915_1093 (size=93) 2024-12-07T12:49:02,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741915_1093 (size=93) 2024-12-07T12:49:02,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741915_1093 (size=93) 2024-12-07T12:49:02,721 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:49:02,721 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-88793570:(num 1733575742670) 2024-12-07T12:49:02,721 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:02,723 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:02,735 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723, exclude list is [], retry=0 2024-12-07T12:49:02,738 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:02,738 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:02,738 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:02,740 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 2024-12-07T12:49:02,740 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693)] 2024-12-07T12:49:02,740 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 602af214f7e58feccdc4aabe6121bbed, NAME => 'testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:02,740 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:02,740 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,740 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,742 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,743 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName a 2024-12-07T12:49:02,743 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,743 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,743 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,744 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName b 2024-12-07T12:49:02,744 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,745 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,745 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,746 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName c 2024-12-07T12:49:02,746 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:02,746 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:02,747 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,747 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,748 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,749 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,749 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,750 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:02,751 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:02,752 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 602af214f7e58feccdc4aabe6121bbed; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65050231, jitterRate=-0.030676022171974182}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:02,753 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 602af214f7e58feccdc4aabe6121bbed: Writing region info on filesystem at 1733575742741Initializing all the Stores at 1733575742741Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742741Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742742 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575742742Cleaning up temporary data from old regions at 1733575742749 (+7 ms)Region opened successfully at 1733575742753 (+4 ms) 2024-12-07T12:49:02,757 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1733575742757/Put/seqid=0 2024-12-07T12:49:02,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741918_1096 (size=4875) 2024-12-07T12:49:02,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741918_1096 (size=4875) 2024-12-07T12:49:02,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741918_1096 (size=4875) 2024-12-07T12:49:02,767 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1733575742767/Put/seqid=0 2024-12-07T12:49:02,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741919_1097 (size=4875) 2024-12-07T12:49:02,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741919_1097 (size=4875) 2024-12-07T12:49:02,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741919_1097 (size=4875) 2024-12-07T12:49:02,777 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1733575742777/Put/seqid=0 2024-12-07T12:49:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741920_1098 (size=4875) 2024-12-07T12:49:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741920_1098 (size=4875) 2024-12-07T12:49:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741920_1098 (size=4875) 2024-12-07T12:49:02,787 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 602af214f7e58feccdc4aabe6121bbed/a 2024-12-07T12:49:02,790 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-12-07T12:49:02,790 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T12:49:02,791 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 602af214f7e58feccdc4aabe6121bbed/a 2024-12-07T12:49:02,794 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-12-07T12:49:02,794 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T12:49:02,794 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 602af214f7e58feccdc4aabe6121bbed/a 2024-12-07T12:49:02,798 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-12-07T12:49:02,798 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-07T12:49:02,798 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 602af214f7e58feccdc4aabe6121bbed 3/3 column families, dataSize=51 B heapSize=896 B 2024-12-07T12:49:02,812 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/.tmp/a/7e5aff05eae046618d05873ab9a3c8d4 is 55, key is testCompactedBulkLoadedFiles/a:a/1733575742753/Put/seqid=0 2024-12-07T12:49:02,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741921_1099 (size=5107) 2024-12-07T12:49:02,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741921_1099 (size=5107) 2024-12-07T12:49:02,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741921_1099 (size=5107) 2024-12-07T12:49:03,220 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/.tmp/a/7e5aff05eae046618d05873ab9a3c8d4 2024-12-07T12:49:03,227 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/.tmp/a/7e5aff05eae046618d05873ab9a3c8d4 as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4 2024-12-07T12:49:03,233 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4, entries=1, sequenceid=4, filesize=5.0 K 2024-12-07T12:49:03,234 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 602af214f7e58feccdc4aabe6121bbed in 436ms, sequenceid=4, compaction requested=false 2024-12-07T12:49:03,234 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 602af214f7e58feccdc4aabe6121bbed: 2024-12-07T12:49:03,236 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_ 2024-12-07T12:49:03,237 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_ 2024-12-07T12:49:03,238 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_ 2024-12-07T12:49:03,238 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile0 into 602af214f7e58feccdc4aabe6121bbed/a as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_ - updating store file list. 2024-12-07T12:49:03,242 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T12:49:03,242 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_ into 602af214f7e58feccdc4aabe6121bbed/a 2024-12-07T12:49:03,243 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile0 into 602af214f7e58feccdc4aabe6121bbed/a (new location: hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_) 2024-12-07T12:49:03,244 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile1 into 602af214f7e58feccdc4aabe6121bbed/a as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_ - updating store file list. 2024-12-07T12:49:03,247 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T12:49:03,248 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_ into 602af214f7e58feccdc4aabe6121bbed/a 2024-12-07T12:49:03,248 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile1 into 602af214f7e58feccdc4aabe6121bbed/a (new location: hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_) 2024-12-07T12:49:03,249 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile2 into 602af214f7e58feccdc4aabe6121bbed/a as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_ - updating store file list. 2024-12-07T12:49:03,253 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 662ad85c083e44a1a9f82a9c524422cf_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T12:49:03,253 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_ into 602af214f7e58feccdc4aabe6121bbed/a 2024-12-07T12:49:03,253 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:43841/hbase/testCompactedBulkLoadedFiles/hfile2 into 602af214f7e58feccdc4aabe6121bbed/a (new location: hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_) 2024-12-07T12:49:03,260 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T12:49:03,260 DEBUG [Time-limited test {}] regionserver.HStore(1541): 602af214f7e58feccdc4aabe6121bbed/a is initiating major compaction (all files) 2024-12-07T12:49:03,260 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 602af214f7e58feccdc4aabe6121bbed/a in testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:03,260 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_] into tmpdir=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/.tmp, totalSize=19.3 K 2024-12-07T12:49:03,261 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7e5aff05eae046618d05873ab9a3c8d4, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1733575742753 2024-12-07T12:49:03,261 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-07T12:49:03,262 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-07T12:49:03,262 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 662ad85c083e44a1a9f82a9c524422cf_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-07T12:49:03,274 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/.tmp/a/e5a1967cb2a242f69bef35c541cc5d18 is 55, key is testCompactedBulkLoadedFiles/a:a/1733575742753/Put/seqid=0 2024-12-07T12:49:03,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741922_1100 (size=6154) 2024-12-07T12:49:03,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741922_1100 (size=6154) 2024-12-07T12:49:03,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741922_1100 (size=6154) 2024-12-07T12:49:03,286 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/.tmp/a/e5a1967cb2a242f69bef35c541cc5d18 as hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/e5a1967cb2a242f69bef35c541cc5d18 2024-12-07T12:49:03,292 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 602af214f7e58feccdc4aabe6121bbed/a of 602af214f7e58feccdc4aabe6121bbed into e5a1967cb2a242f69bef35c541cc5d18(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:49:03,292 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 602af214f7e58feccdc4aabe6121bbed: 2024-12-07T12:49:03,292 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-07T12:49:03,292 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-07T12:49:03,318 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723, size=0 (0bytes) 2024-12-07T12:49:03,318 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 might be still open, length is 0 2024-12-07T12:49:03,318 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 2024-12-07T12:49:03,319 WARN [IPC Server handler 1 on default port 43841 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 has not been closed. Lease recovery is in progress. RecoveryId = 1101 for block blk_1073741917_1095 2024-12-07T12:49:03,319 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 after 1ms 2024-12-07T12:49:03,436 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:49:04,536 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-12-07T12:49:04,536 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-12-07T12:49:04,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-12-07T12:49:04,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-12-07T12:49:06,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58438 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:36341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58438 dst: /127.0.0.1:36341 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36341 remote=/127.0.0.1:58438]. Total timeout mills is 60000, 57065 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:06,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:35918 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35918 dst: /127.0.0.1:46077 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:06,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:44246 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44246 dst: /127.0.0.1:35169 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741917_1101 (size=1659) 2024-12-07T12:49:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741917_1101 (size=1659) 2024-12-07T12:49:06,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741917_1101 (size=1659) 2024-12-07T12:49:07,320 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 after 4002ms 2024-12-07T12:49:07,328 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 took 4010ms 2024-12-07T12:49:07,331 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723; continuing. 2024-12-07T12:49:07,331 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 so closing down 2024-12-07T12:49:07,331 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:49:07,333 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733575742723.temp 2024-12-07T12:49:07,335 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000003-wal.1733575742723.temp 2024-12-07T12:49:07,335 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:49:07,336 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T12:49:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741923_1102 (size=698) 2024-12-07T12:49:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741923_1102 (size=698) 2024-12-07T12:49:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741923_1102 (size=698) 2024-12-07T12:49:07,344 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000003-wal.1733575742723.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-07T12:49:07,346 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000003-wal.1733575742723.temp to hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000008 2024-12-07T12:49:07,346 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T12:49:07,346 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723, journal: Splitting hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723, size=0 (0bytes) at 1733575743318Finishing writing output for hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 so closing down at 1733575747331 (+4013 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000003-wal.1733575742723.temp at 1733575747335 (+4 ms)3 split writer threads finished at 1733575747335Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000003-wal.1733575742723.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733575747344 (+9 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000003-wal.1733575742723.temp to hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000008 at 1733575747346 (+2 ms)Processed 5 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723, size=0, length=0, corrupted=false, cancelled=false at 1733575747346 2024-12-07T12:49:07,348 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575742723 2024-12-07T12:49:07,348 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000008 2024-12-07T12:49:07,349 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:07,350 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:07,369 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575747351, exclude list is [], retry=0 2024-12-07T12:49:07,372 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:07,372 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:07,372 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:07,373 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575747351 2024-12-07T12:49:07,374 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:49:07,374 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 602af214f7e58feccdc4aabe6121bbed, NAME => 'testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:07,374 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:07,374 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,374 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,376 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,376 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName a 2024-12-07T12:49:07,376 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,383 DEBUG [StoreFileOpener-602af214f7e58feccdc4aabe6121bbed-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T12:49:07,383 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_ 2024-12-07T12:49:07,387 DEBUG [StoreFileOpener-602af214f7e58feccdc4aabe6121bbed-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 662ad85c083e44a1a9f82a9c524422cf_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T12:49:07,387 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_ 2024-12-07T12:49:07,391 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4 2024-12-07T12:49:07,396 DEBUG [StoreFileOpener-602af214f7e58feccdc4aabe6121bbed-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-07T12:49:07,396 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_ 2024-12-07T12:49:07,400 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/e5a1967cb2a242f69bef35c541cc5d18 2024-12-07T12:49:07,400 WARN [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@fae3427 2024-12-07T12:49:07,400 WARN [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@fae3427 2024-12-07T12:49:07,400 WARN [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@fae3427 2024-12-07T12:49:07,400 WARN [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@fae3427 2024-12-07T12:49:07,401 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_] to archive 2024-12-07T12:49:07,401 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:49:07,403 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_ to hdfs://localhost:43841/hbase/archive/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_ 2024-12-07T12:49:07,404 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_ to hdfs://localhost:43841/hbase/archive/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/662ad85c083e44a1a9f82a9c524422cf_SeqId_4_ 2024-12-07T12:49:07,405 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4 to hdfs://localhost:43841/hbase/archive/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/7e5aff05eae046618d05873ab9a3c8d4 2024-12-07T12:49:07,406 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_ to hdfs://localhost:43841/hbase/archive/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/a/d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_ 2024-12-07T12:49:07,406 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,406 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,407 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName b 2024-12-07T12:49:07,407 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,408 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,408 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,408 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 602af214f7e58feccdc4aabe6121bbed columnFamilyName c 2024-12-07T12:49:07,408 DEBUG [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,409 INFO [StoreOpener-602af214f7e58feccdc4aabe6121bbed-1 {}] regionserver.HStore(327): Store=602af214f7e58feccdc4aabe6121bbed/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,409 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,410 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,411 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,411 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000008 2024-12-07T12:49:07,414 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 602af214f7e58feccdc4aabe6121bbed : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "602af214f7e58feccdc4aabe6121bbed" family_name: "a" compaction_input: "7e5aff05eae046618d05873ab9a3c8d4" compaction_input: "d487ff825d7f4e28bc3181f5fe8f3018_SeqId_4_" compaction_input: "15a330d6c758429fb846a1e7e7c8ea76_SeqId_4_" compaction_input: "662ad85c083e44a1a9f82a9c524422cf_SeqId_4_" compaction_output: "e5a1967cb2a242f69bef35c541cc5d18" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-12-07T12:49:07,414 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-12-07T12:49:07,414 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000008 2024-12-07T12:49:07,415 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/0000000000000000008 2024-12-07T12:49:07,416 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,416 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,417 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:07,418 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 602af214f7e58feccdc4aabe6121bbed 2024-12-07T12:49:07,420 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testCompactedBulkLoadedFiles/602af214f7e58feccdc4aabe6121bbed/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T12:49:07,421 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 602af214f7e58feccdc4aabe6121bbed; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64325921, jitterRate=-0.04146908223628998}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:07,421 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 602af214f7e58feccdc4aabe6121bbed: Writing region info on filesystem at 1733575747374Initializing all the Stores at 1733575747375 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747375Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747375Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747375Cleaning up temporary data from old regions at 1733575747416 (+41 ms)Region opened successfully at 1733575747421 (+5 ms) 2024-12-07T12:49:07,423 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 602af214f7e58feccdc4aabe6121bbed, disabling compactions & flushes 2024-12-07T12:49:07,423 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:07,423 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:07,423 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. after waiting 0 ms 2024-12-07T12:49:07,423 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:07,424 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733575742666.602af214f7e58feccdc4aabe6121bbed. 2024-12-07T12:49:07,424 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 602af214f7e58feccdc4aabe6121bbed: Waiting for close lock at 1733575747423Disabling compacts and flushes for region at 1733575747423Disabling writes for close at 1733575747423Writing region close event to WAL at 1733575747424 (+1 ms)Closed at 1733575747424 2024-12-07T12:49:07,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741924_1103 (size=93) 2024-12-07T12:49:07,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741924_1103 (size=93) 2024-12-07T12:49:07,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741924_1103 (size=93) 2024-12-07T12:49:07,429 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:49:07,429 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733575747351) 2024-12-07T12:49:07,443 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testCompactedBulkLoadedFiles Thread=443 (was 431) Potentially hanging thread: IPC Client (473829079) connection to localhost/127.0.0.1:43841 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:43841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_689300076_22 at /127.0.0.1:36004 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_689300076_22 at /127.0.0.1:44314 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_689300076_22 at /127.0.0.1:58514 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1346 (was 1264) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=173 (was 189), ProcessCount=11 (was 11), AvailableMemoryMB=5591 (was 5603) 2024-12-07T12:49:07,444 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1346 is superior to 1024 2024-12-07T12:49:07,459 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplay#testReplayEditsWrittenViaHRegion Thread=443, OpenFileDescriptor=1346, MaxFileDescriptor=1048576, SystemLoadAverage=173, ProcessCount=11, AvailableMemoryMB=5590 2024-12-07T12:49:07,460 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1346 is superior to 1024 2024-12-07T12:49:07,477 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:49:07,479 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T12:49:07,479 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T12:49:07,482 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-40879071, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/hregion-40879071, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:07,494 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-40879071/hregion-40879071.1733575747482, exclude list is [], retry=0 2024-12-07T12:49:07,496 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:07,497 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:07,497 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:07,498 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-40879071/hregion-40879071.1733575747482 2024-12-07T12:49:07,499 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:49:07,499 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 02acebcd31199dcbe633360df34faf13, NAME => 'testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43841/hbase 2024-12-07T12:49:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741926_1105 (size=67) 2024-12-07T12:49:07,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741926_1105 (size=67) 2024-12-07T12:49:07,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741926_1105 (size=67) 2024-12-07T12:49:07,511 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:07,513 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,514 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName a 2024-12-07T12:49:07,514 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,515 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,515 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,516 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName b 2024-12-07T12:49:07,516 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,517 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,517 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,518 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName c 2024-12-07T12:49:07,518 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,518 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,518 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,519 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,519 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,520 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,520 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,521 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:07,522 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,524 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:49:07,524 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 02acebcd31199dcbe633360df34faf13; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63013083, jitterRate=-0.06103189289569855}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:07,524 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 02acebcd31199dcbe633360df34faf13: Writing region info on filesystem at 1733575747512Initializing all the Stores at 1733575747512Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747512Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747513 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747513Cleaning up temporary data from old regions at 1733575747520 (+7 ms)Region opened successfully at 1733575747524 (+4 ms) 2024-12-07T12:49:07,524 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 02acebcd31199dcbe633360df34faf13, disabling compactions & flushes 2024-12-07T12:49:07,524 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,524 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,524 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. after waiting 0 ms 2024-12-07T12:49:07,524 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,525 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,525 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 02acebcd31199dcbe633360df34faf13: Waiting for close lock at 1733575747524Disabling compacts and flushes for region at 1733575747524Disabling writes for close at 1733575747524Writing region close event to WAL at 1733575747525 (+1 ms)Closed at 1733575747525 2024-12-07T12:49:07,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741925_1104 (size=93) 2024-12-07T12:49:07,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741925_1104 (size=93) 2024-12-07T12:49:07,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741925_1104 (size=93) 2024-12-07T12:49:07,529 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:49:07,529 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-40879071:(num 1733575747482) 2024-12-07T12:49:07,529 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:07,530 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:07,543 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530, exclude list is [], retry=0 2024-12-07T12:49:07,545 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:07,546 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:07,546 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:07,547 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530 2024-12-07T12:49:07,547 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:49:07,547 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 02acebcd31199dcbe633360df34faf13, NAME => 'testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:07,547 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:07,547 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,547 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,549 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,549 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName a 2024-12-07T12:49:07,549 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,550 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,550 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,551 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName b 2024-12-07T12:49:07,551 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,551 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,551 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,552 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName c 2024-12-07T12:49:07,552 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,552 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,553 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,553 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,554 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,555 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,555 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,556 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:07,557 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,558 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 02acebcd31199dcbe633360df34faf13; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60442469, jitterRate=-0.09933702647686005}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:07,558 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 02acebcd31199dcbe633360df34faf13: Writing region info on filesystem at 1733575747548Initializing all the Stores at 1733575747548Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747548Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747548Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747548Cleaning up temporary data from old regions at 1733575747555 (+7 ms)Region opened successfully at 1733575747558 (+3 ms) 2024-12-07T12:49:07,565 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 02acebcd31199dcbe633360df34faf13 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-07T12:49:07,579 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/a/e4f77f1cc6aa4d74bcde48326002db17 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733575747558/Put/seqid=0 2024-12-07T12:49:07,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741928_1107 (size=5958) 2024-12-07T12:49:07,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741928_1107 (size=5958) 2024-12-07T12:49:07,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741928_1107 (size=5958) 2024-12-07T12:49:07,585 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/a/e4f77f1cc6aa4d74bcde48326002db17 2024-12-07T12:49:07,589 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/a/e4f77f1cc6aa4d74bcde48326002db17 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/a/e4f77f1cc6aa4d74bcde48326002db17 2024-12-07T12:49:07,593 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/a/e4f77f1cc6aa4d74bcde48326002db17, entries=10, sequenceid=13, filesize=5.8 K 2024-12-07T12:49:07,595 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 02acebcd31199dcbe633360df34faf13 in 29ms, sequenceid=13, compaction requested=false 2024-12-07T12:49:07,595 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 02acebcd31199dcbe633360df34faf13: 2024-12-07T12:49:07,612 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 02acebcd31199dcbe633360df34faf13, disabling compactions & flushes 2024-12-07T12:49:07,612 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,612 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,612 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. after waiting 0 ms 2024-12-07T12:49:07,612 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,612 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,613 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:07,613 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 02acebcd31199dcbe633360df34faf13: Waiting for close lock at 1733575747611Disabling compacts and flushes for region at 1733575747611Disabling writes for close at 1733575747612 (+1 ms)Writing region close event to WAL at 1733575747612Closed at 1733575747612 2024-12-07T12:49:07,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741927_1106 (size=5869) 2024-12-07T12:49:07,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741927_1106 (size=5869) 2024-12-07T12:49:07,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741927_1106 (size=5869) 2024-12-07T12:49:07,630 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530, size=5.7 K (5869bytes) 2024-12-07T12:49:07,630 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530 2024-12-07T12:49:07,631 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530 after 1ms 2024-12-07T12:49:07,633 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530 took 3ms 2024-12-07T12:49:07,634 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530 so closing down 2024-12-07T12:49:07,634 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:49:07,635 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733575747530.temp 2024-12-07T12:49:07,636 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000003-wal.1733575747530.temp 2024-12-07T12:49:07,636 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:49:07,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741929_1108 (size=5223) 2024-12-07T12:49:07,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741929_1108 (size=5223) 2024-12-07T12:49:07,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741929_1108 (size=5223) 2024-12-07T12:49:07,642 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000003-wal.1733575747530.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-07T12:49:07,644 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000003-wal.1733575747530.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000035 2024-12-07T12:49:07,644 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 11 ms; skipped=2; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530, size=5.7 K, length=5869, corrupted=false, cancelled=false 2024-12-07T12:49:07,644 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530, journal: Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530, size=5.7 K (5869bytes) at 1733575747630Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530 so closing down at 1733575747634 (+4 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000003-wal.1733575747530.temp at 1733575747636 (+2 ms)3 split writer threads finished at 1733575747636Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000003-wal.1733575747530.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733575747643 (+7 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000003-wal.1733575747530.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000035 at 1733575747644 (+1 ms)Processed 32 edits across 1 Regions in 11 ms; skipped=2; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530, size=5.7 K, length=5869, corrupted=false, cancelled=false at 1733575747644 2024-12-07T12:49:07,645 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747530 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575747530 2024-12-07T12:49:07,646 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000035 2024-12-07T12:49:07,646 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:07,648 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:07,659 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648, exclude list is [], retry=0 2024-12-07T12:49:07,662 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:07,662 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:07,662 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:07,664 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 2024-12-07T12:49:07,665 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:40403:40403)] 2024-12-07T12:49:07,665 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 02acebcd31199dcbe633360df34faf13, NAME => 'testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:49:07,665 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:07,665 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,665 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,666 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,667 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName a 2024-12-07T12:49:07,667 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,672 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/a/e4f77f1cc6aa4d74bcde48326002db17 2024-12-07T12:49:07,673 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,673 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,674 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName b 2024-12-07T12:49:07,674 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,674 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,674 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,675 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName c 2024-12-07T12:49:07,675 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:07,675 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:07,675 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,676 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,677 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,678 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000035 2024-12-07T12:49:07,680 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000035 2024-12-07T12:49:07,680 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 02acebcd31199dcbe633360df34faf13 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-12-07T12:49:07,692 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/b/935f0a9f99ca4f5cb50d92649485ef30 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733575747595/Put/seqid=0 2024-12-07T12:49:07,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741931_1110 (size=5958) 2024-12-07T12:49:07,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741931_1110 (size=5958) 2024-12-07T12:49:07,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741931_1110 (size=5958) 2024-12-07T12:49:07,698 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/b/935f0a9f99ca4f5cb50d92649485ef30 2024-12-07T12:49:07,715 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/c/ac5f80b397414bc98b3f4aee01c83904 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733575747602/Put/seqid=0 2024-12-07T12:49:07,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741932_1111 (size=5958) 2024-12-07T12:49:07,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741932_1111 (size=5958) 2024-12-07T12:49:07,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741932_1111 (size=5958) 2024-12-07T12:49:07,722 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/c/ac5f80b397414bc98b3f4aee01c83904 2024-12-07T12:49:07,726 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/b/935f0a9f99ca4f5cb50d92649485ef30 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/b/935f0a9f99ca4f5cb50d92649485ef30 2024-12-07T12:49:07,731 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/b/935f0a9f99ca4f5cb50d92649485ef30, entries=10, sequenceid=35, filesize=5.8 K 2024-12-07T12:49:07,732 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/c/ac5f80b397414bc98b3f4aee01c83904 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/c/ac5f80b397414bc98b3f4aee01c83904 2024-12-07T12:49:07,736 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/c/ac5f80b397414bc98b3f4aee01c83904, entries=10, sequenceid=35, filesize=5.8 K 2024-12-07T12:49:07,736 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 02acebcd31199dcbe633360df34faf13 in 56ms, sequenceid=35, compaction requested=false; wal=null 2024-12-07T12:49:07,737 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000035 2024-12-07T12:49:07,738 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,738 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,739 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:07,740 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:07,742 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-12-07T12:49:07,742 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 02acebcd31199dcbe633360df34faf13; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63034509, jitterRate=-0.06071262061595917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:07,743 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 02acebcd31199dcbe633360df34faf13: Writing region info on filesystem at 1733575747665Initializing all the Stores at 1733575747666 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747666Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747666Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575747666Obtaining lock to block concurrent updates at 1733575747680 (+14 ms)Preparing flush snapshotting stores in 02acebcd31199dcbe633360df34faf13 at 1733575747680Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1733575747680Flushing stores of testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. at 1733575747680Flushing 02acebcd31199dcbe633360df34faf13/b: creating writer at 1733575747680Flushing 02acebcd31199dcbe633360df34faf13/b: appending metadata at 1733575747692 (+12 ms)Flushing 02acebcd31199dcbe633360df34faf13/b: closing flushed file at 1733575747692Flushing 02acebcd31199dcbe633360df34faf13/c: creating writer at 1733575747702 (+10 ms)Flushing 02acebcd31199dcbe633360df34faf13/c: appending metadata at 1733575747715 (+13 ms)Flushing 02acebcd31199dcbe633360df34faf13/c: closing flushed file at 1733575747715Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e248f6a: reopening flushed file at 1733575747726 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60ac0436: reopening flushed file at 1733575747731 (+5 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 02acebcd31199dcbe633360df34faf13 in 56ms, sequenceid=35, compaction requested=false; wal=null at 1733575747736 (+5 ms)Cleaning up temporary data from old regions at 1733575747738 (+2 ms)Region opened successfully at 1733575747743 (+5 ms) 2024-12-07T12:49:07,799 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648, size=0 (0bytes) 2024-12-07T12:49:07,799 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 might be still open, length is 0 2024-12-07T12:49:07,799 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 2024-12-07T12:49:07,800 WARN [IPC Server handler 1 on default port 43841 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 has not been closed. Lease recovery is in progress. RecoveryId = 1112 for block blk_1073741930_1109 2024-12-07T12:49:07,800 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 after 1ms 2024-12-07T12:49:09,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:58570 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:36341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58570 dst: /127.0.0.1:36341 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36341 remote=/127.0.0.1:58570]. Total timeout mills is 60000, 58543 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:09,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:44366 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44366 dst: /127.0.0.1:35169 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:09,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_465337669_22 at /127.0.0.1:36074 [Receiving block BP-1122527667-172.17.0.2-1733575711591:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36074 dst: /127.0.0.1:46077 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:09,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741930_1112 (size=5215) 2024-12-07T12:49:09,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741930_1112 (size=5215) 2024-12-07T12:49:11,801 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 after 4002ms 2024-12-07T12:49:11,809 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 took 4010ms 2024-12-07T12:49:11,812 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648; continuing. 2024-12-07T12:49:11,812 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 so closing down 2024-12-07T12:49:11,812 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-07T12:49:11,814 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1733575747648.temp 2024-12-07T12:49:11,815 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000037-wal.1733575747648.temp 2024-12-07T12:49:11,816 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-07T12:49:11,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741933_1113 (size=5223) 2024-12-07T12:49:11,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741933_1113 (size=5223) 2024-12-07T12:49:11,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741933_1113 (size=5223) 2024-12-07T12:49:11,824 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000037-wal.1733575747648.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-07T12:49:11,825 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000037-wal.1733575747648.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000066 2024-12-07T12:49:11,825 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648, size=0, length=0, corrupted=false, cancelled=false 2024-12-07T12:49:11,826 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648, journal: Splitting hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648, size=0 (0bytes) at 1733575747799Finishing writing output for hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 so closing down at 1733575751812 (+4013 ms)Creating recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000037-wal.1733575747648.temp at 1733575751815 (+3 ms)3 split writer threads finished at 1733575751816 (+1 ms)Closed recovered edits writer path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000037-wal.1733575747648.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733575751824 (+8 ms)Rename recovered edits hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000037-wal.1733575747648.temp to hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000066 at 1733575751825 (+1 ms)Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648, size=0, length=0, corrupted=false, cancelled=false at 1733575751825 2024-12-07T12:49:11,827 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 to hdfs://localhost:43841/hbase/oldWALs/wal.1733575747648 2024-12-07T12:49:11,828 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000066 2024-12-07T12:49:11,828 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-07T12:49:11,830 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:43841/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477, archiveDir=hdfs://localhost:43841/hbase/oldWALs, maxLogs=32 2024-12-07T12:49:11,842 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575751830, exclude list is [], retry=0 2024-12-07T12:49:11,844 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46077,DS-c130b5b7-9f3f-4346-b574-e4858082ed6f,DISK] 2024-12-07T12:49:11,845 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35169,DS-1d2a644a-cb4c-4fc7-9ca3-c96c11d6f674,DISK] 2024-12-07T12:49:11,845 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36341,DS-354943d8-05bb-4e6a-aa4d-8ce59e50a242,DISK] 2024-12-07T12:49:11,846 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575751830 2024-12-07T12:49:11,846 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40403:40403),(127.0.0.1/127.0.0.1:42693:42693),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-07T12:49:11,847 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:49:11,848 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,849 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName a 2024-12-07T12:49:11,849 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:11,853 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/a/e4f77f1cc6aa4d74bcde48326002db17 2024-12-07T12:49:11,854 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:11,854 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,855 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName b 2024-12-07T12:49:11,855 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:11,861 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/b/935f0a9f99ca4f5cb50d92649485ef30 2024-12-07T12:49:11,861 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:11,861 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,862 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02acebcd31199dcbe633360df34faf13 columnFamilyName c 2024-12-07T12:49:11,862 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:49:11,867 DEBUG [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/c/ac5f80b397414bc98b3f4aee01c83904 2024-12-07T12:49:11,868 INFO [StoreOpener-02acebcd31199dcbe633360df34faf13-1 {}] regionserver.HStore(327): Store=02acebcd31199dcbe633360df34faf13/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:49:11,868 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,869 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,871 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,871 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000066 2024-12-07T12:49:11,877 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000066 2024-12-07T12:49:11,877 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 02acebcd31199dcbe633360df34faf13 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-07T12:49:11,891 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/a/a485d085127c44c2a7674db00384f53f is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1733575747749/Put/seqid=0 2024-12-07T12:49:11,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741935_1115 (size=5958) 2024-12-07T12:49:11,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741935_1115 (size=5958) 2024-12-07T12:49:11,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741935_1115 (size=5958) 2024-12-07T12:49:11,897 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/a/a485d085127c44c2a7674db00384f53f 2024-12-07T12:49:11,915 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/b/263a14412b2c42f4aad3325b555ca139 is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1733575747755/Put/seqid=0 2024-12-07T12:49:11,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741936_1116 (size=5958) 2024-12-07T12:49:11,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741936_1116 (size=5958) 2024-12-07T12:49:11,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741936_1116 (size=5958) 2024-12-07T12:49:11,921 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/b/263a14412b2c42f4aad3325b555ca139 2024-12-07T12:49:11,938 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/c/2d7c723d067343889945e654179b92d1 is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1733575747761/Put/seqid=0 2024-12-07T12:49:11,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741937_1117 (size=5958) 2024-12-07T12:49:11,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741937_1117 (size=5958) 2024-12-07T12:49:11,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741937_1117 (size=5958) 2024-12-07T12:49:11,946 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/c/2d7c723d067343889945e654179b92d1 2024-12-07T12:49:11,950 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/a/a485d085127c44c2a7674db00384f53f as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/a/a485d085127c44c2a7674db00384f53f 2024-12-07T12:49:11,955 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/a/a485d085127c44c2a7674db00384f53f, entries=10, sequenceid=66, filesize=5.8 K 2024-12-07T12:49:11,956 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/b/263a14412b2c42f4aad3325b555ca139 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/b/263a14412b2c42f4aad3325b555ca139 2024-12-07T12:49:11,960 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/b/263a14412b2c42f4aad3325b555ca139, entries=10, sequenceid=66, filesize=5.8 K 2024-12-07T12:49:11,961 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/.tmp/c/2d7c723d067343889945e654179b92d1 as hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/c/2d7c723d067343889945e654179b92d1 2024-12-07T12:49:11,965 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/c/2d7c723d067343889945e654179b92d1, entries=10, sequenceid=66, filesize=5.8 K 2024-12-07T12:49:11,965 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 02acebcd31199dcbe633360df34faf13 in 88ms, sequenceid=66, compaction requested=false; wal=null 2024-12-07T12:49:11,966 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/0000000000000000066 2024-12-07T12:49:11,967 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,967 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,967 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T12:49:11,969 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 02acebcd31199dcbe633360df34faf13 2024-12-07T12:49:11,970 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/hbase/data/default/testReplayEditsWrittenViaHRegion/02acebcd31199dcbe633360df34faf13/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-12-07T12:49:11,971 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 02acebcd31199dcbe633360df34faf13; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59447912, jitterRate=-0.11415708065032959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T12:49:11,971 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 02acebcd31199dcbe633360df34faf13: Writing region info on filesystem at 1733575751847Initializing all the Stores at 1733575751848 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575751848Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575751848Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733575751848Obtaining lock to block concurrent updates at 1733575751877 (+29 ms)Preparing flush snapshotting stores in 02acebcd31199dcbe633360df34faf13 at 1733575751877Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1733575751877Flushing stores of testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. at 1733575751877Flushing 02acebcd31199dcbe633360df34faf13/a: creating writer at 1733575751877Flushing 02acebcd31199dcbe633360df34faf13/a: appending metadata at 1733575751890 (+13 ms)Flushing 02acebcd31199dcbe633360df34faf13/a: closing flushed file at 1733575751890Flushing 02acebcd31199dcbe633360df34faf13/b: creating writer at 1733575751900 (+10 ms)Flushing 02acebcd31199dcbe633360df34faf13/b: appending metadata at 1733575751914 (+14 ms)Flushing 02acebcd31199dcbe633360df34faf13/b: closing flushed file at 1733575751914Flushing 02acebcd31199dcbe633360df34faf13/c: creating writer at 1733575751926 (+12 ms)Flushing 02acebcd31199dcbe633360df34faf13/c: appending metadata at 1733575751937 (+11 ms)Flushing 02acebcd31199dcbe633360df34faf13/c: closing flushed file at 1733575751938 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6af46bd3: reopening flushed file at 1733575751950 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b974e6: reopening flushed file at 1733575751955 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28175d35: reopening flushed file at 1733575751960 (+5 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 02acebcd31199dcbe633360df34faf13 in 88ms, sequenceid=66, compaction requested=false; wal=null at 1733575751965 (+5 ms)Cleaning up temporary data from old regions at 1733575751967 (+2 ms)Region opened successfully at 1733575751971 (+4 ms) 2024-12-07T12:49:11,983 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 02acebcd31199dcbe633360df34faf13, disabling compactions & flushes 2024-12-07T12:49:11,983 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:11,983 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:11,983 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. after waiting 0 ms 2024-12-07T12:49:11,983 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:11,984 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733575747478.02acebcd31199dcbe633360df34faf13. 2024-12-07T12:49:11,984 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 02acebcd31199dcbe633360df34faf13: Waiting for close lock at 1733575751983Disabling compacts and flushes for region at 1733575751983Disabling writes for close at 1733575751983Writing region close event to WAL at 1733575751984 (+1 ms)Closed at 1733575751984 2024-12-07T12:49:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741934_1114 (size=93) 2024-12-07T12:49:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741934_1114 (size=93) 2024-12-07T12:49:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741934_1114 (size=93) 2024-12-07T12:49:11,989 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-07T12:49:11,989 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733575751830) 2024-12-07T12:49:12,002 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplay#testReplayEditsWrittenViaHRegion Thread=447 (was 443) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:43841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1237915351_22 at /127.0.0.1:41250 [Waiting for operation #20] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1237915351_22 at /127.0.0.1:45376 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1237915351_22 at /127.0.0.1:43416 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (473829079) connection to localhost/127.0.0.1:43841 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=1412 (was 1346) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=160 (was 173), ProcessCount=11 (was 11), AvailableMemoryMB=5581 (was 5590) 2024-12-07T12:49:12,003 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1412 is superior to 1024 2024-12-07T12:49:12,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:49:12,003 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:49:12,003 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:49:12,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:49:12,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:49:12,004 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:49:12,004 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:49:12,004 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=524162917, stopped=false 2024-12-07T12:49:12,004 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2d46b487c067,37233,1733575714217 2024-12-07T12:49:12,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:49:12,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:49:12,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:49:12,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:49:12,006 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:49:12,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:49:12,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:49:12,006 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:49:12,006 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:49:12,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:49:12,007 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2d46b487c067,39787,1733575714772' ***** 2024-12-07T12:49:12,007 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:49:12,007 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:49:12,007 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:49:12,007 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:49:12,007 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2d46b487c067,44445,1733575714899' ***** 2024-12-07T12:49:12,007 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:49:12,007 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:49:12,007 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:49:12,008 INFO [RS:0;2d46b487c067:39787 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:49:12,008 INFO [RS:0;2d46b487c067:39787 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(3091): Received CLOSE for 05976781667afccaf4cfd2929edf2476 2024-12-07T12:49:12,008 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(959): stopping server 2d46b487c067,39787,1733575714772 2024-12-07T12:49:12,008 INFO [RS:0;2d46b487c067:39787 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:49:12,008 INFO [RS:0;2d46b487c067:39787 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2d46b487c067:39787. 2024-12-07T12:49:12,008 DEBUG [RS:0;2d46b487c067:39787 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(959): stopping server 2d46b487c067,44445,1733575714899 2024-12-07T12:49:12,008 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:49:12,008 DEBUG [RS:0;2d46b487c067:39787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:49:12,008 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(976): stopping server 2d46b487c067,39787,1733575714772; all regions closed. 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2d46b487c067:44445. 2024-12-07T12:49:12,008 DEBUG [RS:2;2d46b487c067:44445 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:49:12,008 DEBUG [RS:2;2d46b487c067:44445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:49:12,008 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:49:12,008 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 05976781667afccaf4cfd2929edf2476, disabling compactions & flushes 2024-12-07T12:49:12,008 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:49:12,008 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:49:12,009 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:49:12,009 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. after waiting 0 ms 2024-12-07T12:49:12,009 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:49:12,009 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T12:49:12,009 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1325): Online Regions={05976781667afccaf4cfd2929edf2476=testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476., 1588230740=hbase:meta,,1.1588230740} 2024-12-07T12:49:12,009 DEBUG [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1351): Waiting on 05976781667afccaf4cfd2929edf2476, 1588230740 2024-12-07T12:49:12,009 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:49:12,009 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:49:12,009 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:49:12,009 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:49:12,009 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:49:12,009 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=6.86 KB heapSize=11.45 KB 2024-12-07T12:49:12,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741833_1009 (size=2598) 2024-12-07T12:49:12,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741833_1009 (size=2598) 2024-12-07T12:49:12,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741833_1009 (size=2598) 2024-12-07T12:49:12,014 DEBUG [RS:0;2d46b487c067:39787 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs 2024-12-07T12:49:12,014 INFO [RS:0;2d46b487c067:39787 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2d46b487c067%2C39787%2C1733575714772:(num 1733575716065) 2024-12-07T12:49:12,014 DEBUG [RS:0;2d46b487c067:39787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:49:12,014 INFO [RS:0;2d46b487c067:39787 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:49:12,014 INFO [RS:0;2d46b487c067:39787 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:49:12,015 INFO [RS:0;2d46b487c067:39787 {}] hbase.ChoreService(370): Chore service for: regionserver/2d46b487c067:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T12:49:12,015 INFO [RS:0;2d46b487c067:39787 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:49:12,015 INFO [RS:0;2d46b487c067:39787 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:49:12,015 INFO [RS:0;2d46b487c067:39787 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:49:12,015 INFO [RS:0;2d46b487c067:39787 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:49:12,015 INFO [RS:0;2d46b487c067:39787 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39787 2024-12-07T12:49:12,016 INFO [regionserver/2d46b487c067:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:49:12,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2d46b487c067,39787,1733575714772 2024-12-07T12:49:12,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:49:12,017 INFO [RS:0;2d46b487c067:39787 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:49:12,018 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2d46b487c067,39787,1733575714772] 2024-12-07T12:49:12,019 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2d46b487c067,39787,1733575714772 already deleted, retry=false 2024-12-07T12:49:12,019 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2d46b487c067,39787,1733575714772 expired; onlineServers=1 2024-12-07T12:49:12,021 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/default/testReplayEditsAfterRegionMovedWithMultiCF/05976781667afccaf4cfd2929edf2476/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-12-07T12:49:12,021 INFO [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:49:12,022 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 05976781667afccaf4cfd2929edf2476: Waiting for close lock at 1733575752008Running coprocessor pre-close hooks at 1733575752008Disabling compacts and flushes for region at 1733575752008Disabling writes for close at 1733575752009 (+1 ms)Writing region close event to WAL at 1733575752014 (+5 ms)Running coprocessor post-close hooks at 1733575752021 (+7 ms)Closed at 1733575752021 2024-12-07T12:49:12,022 DEBUG [RS_CLOSE_REGION-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476. 2024-12-07T12:49:12,032 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/info/066fe3a30beb4d2f9ba63a909676ddb3 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733575728737.05976781667afccaf4cfd2929edf2476./info:regioninfo/1733575731799/Put/seqid=0 2024-12-07T12:49:12,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741938_1118 (size=8243) 2024-12-07T12:49:12,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741938_1118 (size=8243) 2024-12-07T12:49:12,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741938_1118 (size=8243) 2024-12-07T12:49:12,039 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.65 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/info/066fe3a30beb4d2f9ba63a909676ddb3 2024-12-07T12:49:12,054 INFO [regionserver/2d46b487c067:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:49:12,054 INFO [regionserver/2d46b487c067:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:49:12,067 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/ns/fc385e52bc6842c2ba554815f994f580 is 43, key is default/ns:d/1733575716627/Put/seqid=0 2024-12-07T12:49:12,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741939_1119 (size=5153) 2024-12-07T12:49:12,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741939_1119 (size=5153) 2024-12-07T12:49:12,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741939_1119 (size=5153) 2024-12-07T12:49:12,073 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/ns/fc385e52bc6842c2ba554815f994f580 2024-12-07T12:49:12,090 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/table/b4452480ce46412885f1ee09d5d86bb9 is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1733575729165/Put/seqid=0 2024-12-07T12:49:12,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741940_1120 (size=5431) 2024-12-07T12:49:12,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741940_1120 (size=5431) 2024-12-07T12:49:12,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741940_1120 (size=5431) 2024-12-07T12:49:12,097 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/table/b4452480ce46412885f1ee09d5d86bb9 2024-12-07T12:49:12,104 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/info/066fe3a30beb4d2f9ba63a909676ddb3 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/info/066fe3a30beb4d2f9ba63a909676ddb3 2024-12-07T12:49:12,110 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/info/066fe3a30beb4d2f9ba63a909676ddb3, entries=18, sequenceid=21, filesize=8.0 K 2024-12-07T12:49:12,111 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/ns/fc385e52bc6842c2ba554815f994f580 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/ns/fc385e52bc6842c2ba554815f994f580 2024-12-07T12:49:12,116 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/ns/fc385e52bc6842c2ba554815f994f580, entries=2, sequenceid=21, filesize=5.0 K 2024-12-07T12:49:12,117 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/.tmp/table/b4452480ce46412885f1ee09d5d86bb9 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/table/b4452480ce46412885f1ee09d5d86bb9 2024-12-07T12:49:12,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:49:12,119 INFO [RS:0;2d46b487c067:39787 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:49:12,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39787-0x100b4b97f370001, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:49:12,119 INFO [RS:0;2d46b487c067:39787 {}] regionserver.HRegionServer(1031): Exiting; stopping=2d46b487c067,39787,1733575714772; zookeeper connection closed. 2024-12-07T12:49:12,119 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e6ca128 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e6ca128 2024-12-07T12:49:12,122 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/table/b4452480ce46412885f1ee09d5d86bb9, entries=2, sequenceid=21, filesize=5.3 K 2024-12-07T12:49:12,123 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=21, compaction requested=false 2024-12-07T12:49:12,127 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-07T12:49:12,127 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:49:12,127 INFO [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:49:12,127 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733575752009Running coprocessor pre-close hooks at 1733575752009Disabling compacts and flushes for region at 1733575752009Disabling writes for close at 1733575752009Obtaining lock to block concurrent updates at 1733575752009Preparing flush snapshotting stores in 1588230740 at 1733575752009Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=7029, getHeapSize=11664, getOffHeapSize=0, getCellsCount=48 at 1733575752010 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733575752011 (+1 ms)Flushing 1588230740/info: creating writer at 1733575752011Flushing 1588230740/info: appending metadata at 1733575752031 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733575752032 (+1 ms)Flushing 1588230740/ns: creating writer at 1733575752046 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733575752066 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733575752066Flushing 1588230740/table: creating writer at 1733575752078 (+12 ms)Flushing 1588230740/table: appending metadata at 1733575752089 (+11 ms)Flushing 1588230740/table: closing flushed file at 1733575752090 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40fb2d1d: reopening flushed file at 1733575752103 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@657459d3: reopening flushed file at 1733575752110 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b6c3d2: reopening flushed file at 1733575752116 (+6 ms)Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=21, compaction requested=false at 1733575752123 (+7 ms)Writing region close event to WAL at 1733575752124 (+1 ms)Running coprocessor post-close hooks at 1733575752127 (+3 ms)Closed at 1733575752127 2024-12-07T12:49:12,127 DEBUG [RS_CLOSE_META-regionserver/2d46b487c067:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:49:12,209 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(976): stopping server 2d46b487c067,44445,1733575714899; all regions closed. 2024-12-07T12:49:12,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741836_1012 (size=8899) 2024-12-07T12:49:12,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741836_1012 (size=8899) 2024-12-07T12:49:12,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741836_1012 (size=8899) 2024-12-07T12:49:12,218 DEBUG [RS:2;2d46b487c067:44445 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs 2024-12-07T12:49:12,218 INFO [RS:2;2d46b487c067:44445 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2d46b487c067%2C44445%2C1733575714899.meta:.meta(num 1733575716480) 2024-12-07T12:49:12,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741835_1011 (size=928) 2024-12-07T12:49:12,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741835_1011 (size=928) 2024-12-07T12:49:12,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741835_1011 (size=928) 2024-12-07T12:49:12,225 DEBUG [RS:2;2d46b487c067:44445 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/oldWALs 2024-12-07T12:49:12,225 INFO [RS:2;2d46b487c067:44445 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2d46b487c067%2C44445%2C1733575714899:(num 1733575716065) 2024-12-07T12:49:12,225 DEBUG [RS:2;2d46b487c067:44445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:49:12,225 INFO [RS:2;2d46b487c067:44445 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:49:12,225 INFO [RS:2;2d46b487c067:44445 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:49:12,225 INFO [RS:2;2d46b487c067:44445 {}] hbase.ChoreService(370): Chore service for: regionserver/2d46b487c067:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T12:49:12,226 INFO [RS:2;2d46b487c067:44445 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:49:12,226 INFO [regionserver/2d46b487c067:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:49:12,226 INFO [RS:2;2d46b487c067:44445 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44445 2024-12-07T12:49:12,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2d46b487c067,44445,1733575714899 2024-12-07T12:49:12,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:49:12,228 INFO [RS:2;2d46b487c067:44445 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:49:12,228 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2d46b487c067,44445,1733575714899] 2024-12-07T12:49:12,229 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2d46b487c067,44445,1733575714899 already deleted, retry=false 2024-12-07T12:49:12,229 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2d46b487c067,44445,1733575714899 expired; onlineServers=0 2024-12-07T12:49:12,229 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2d46b487c067,37233,1733575714217' ***** 2024-12-07T12:49:12,229 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:49:12,229 INFO [M:0;2d46b487c067:37233 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:49:12,230 INFO [M:0;2d46b487c067:37233 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:49:12,230 DEBUG [M:0;2d46b487c067:37233 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:49:12,230 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:49:12,230 DEBUG [M:0;2d46b487c067:37233 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:49:12,230 DEBUG [master/2d46b487c067:0:becomeActiveMaster-HFileCleaner.small.0-1733575715747 {}] cleaner.HFileCleaner(306): Exit Thread[master/2d46b487c067:0:becomeActiveMaster-HFileCleaner.small.0-1733575715747,5,FailOnTimeoutGroup] 2024-12-07T12:49:12,230 DEBUG [master/2d46b487c067:0:becomeActiveMaster-HFileCleaner.large.0-1733575715744 {}] cleaner.HFileCleaner(306): Exit Thread[master/2d46b487c067:0:becomeActiveMaster-HFileCleaner.large.0-1733575715744,5,FailOnTimeoutGroup] 2024-12-07T12:49:12,230 INFO [M:0;2d46b487c067:37233 {}] hbase.ChoreService(370): Chore service for: master/2d46b487c067:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:49:12,230 INFO [M:0;2d46b487c067:37233 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:49:12,230 DEBUG [M:0;2d46b487c067:37233 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:49:12,230 INFO [M:0;2d46b487c067:37233 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:49:12,230 INFO [M:0;2d46b487c067:37233 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:49:12,231 INFO [M:0;2d46b487c067:37233 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:49:12,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:49:12,231 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:49:12,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:49:12,231 DEBUG [M:0;2d46b487c067:37233 {}] zookeeper.ZKUtil(347): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:49:12,231 WARN [M:0;2d46b487c067:37233 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:49:12,232 INFO [M:0;2d46b487c067:37233 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/.lastflushedseqids 2024-12-07T12:49:12,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741941_1121 (size=138) 2024-12-07T12:49:12,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741941_1121 (size=138) 2024-12-07T12:49:12,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741941_1121 (size=138) 2024-12-07T12:49:12,245 INFO [M:0;2d46b487c067:37233 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:49:12,245 INFO [M:0;2d46b487c067:37233 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:49:12,245 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:49:12,245 INFO [M:0;2d46b487c067:37233 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:49:12,245 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:49:12,245 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:49:12,245 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:49:12,245 INFO [M:0;2d46b487c067:37233 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=68.33 KB heapSize=83.72 KB 2024-12-07T12:49:12,259 DEBUG [M:0;2d46b487c067:37233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f7e4f365edf14ce4a9dd98f88931d5a1 is 82, key is hbase:meta,,1/info:regioninfo/1733575716564/Put/seqid=0 2024-12-07T12:49:12,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741942_1122 (size=5672) 2024-12-07T12:49:12,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741942_1122 (size=5672) 2024-12-07T12:49:12,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741942_1122 (size=5672) 2024-12-07T12:49:12,265 INFO [M:0;2d46b487c067:37233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f7e4f365edf14ce4a9dd98f88931d5a1 2024-12-07T12:49:12,283 DEBUG [M:0;2d46b487c067:37233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98f49c8efd824187bd4c72e2b358144e is 1075, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733575729171/Put/seqid=0 2024-12-07T12:49:12,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741943_1123 (size=7754) 2024-12-07T12:49:12,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741943_1123 (size=7754) 2024-12-07T12:49:12,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741943_1123 (size=7754) 2024-12-07T12:49:12,291 INFO [M:0;2d46b487c067:37233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.60 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98f49c8efd824187bd4c72e2b358144e 2024-12-07T12:49:12,295 INFO [M:0;2d46b487c067:37233 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 98f49c8efd824187bd4c72e2b358144e 2024-12-07T12:49:12,307 DEBUG [M:0;2d46b487c067:37233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cf05449caae42dc90ecd3be11bb5dd6 is 69, key is 2d46b487c067,39787,1733575714772/rs:state/1733575715841/Put/seqid=0 2024-12-07T12:49:12,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741944_1124 (size=5440) 2024-12-07T12:49:12,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741944_1124 (size=5440) 2024-12-07T12:49:12,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741944_1124 (size=5440) 2024-12-07T12:49:12,313 INFO [M:0;2d46b487c067:37233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cf05449caae42dc90ecd3be11bb5dd6 2024-12-07T12:49:12,317 INFO [M:0;2d46b487c067:37233 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9cf05449caae42dc90ecd3be11bb5dd6 2024-12-07T12:49:12,318 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f7e4f365edf14ce4a9dd98f88931d5a1 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f7e4f365edf14ce4a9dd98f88931d5a1 2024-12-07T12:49:12,323 INFO [M:0;2d46b487c067:37233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f7e4f365edf14ce4a9dd98f88931d5a1, entries=8, sequenceid=168, filesize=5.5 K 2024-12-07T12:49:12,324 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98f49c8efd824187bd4c72e2b358144e as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/98f49c8efd824187bd4c72e2b358144e 2024-12-07T12:49:12,328 INFO [M:0;2d46b487c067:37233 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 98f49c8efd824187bd4c72e2b358144e 2024-12-07T12:49:12,328 INFO [M:0;2d46b487c067:37233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/98f49c8efd824187bd4c72e2b358144e, entries=17, sequenceid=168, filesize=7.6 K 2024-12-07T12:49:12,328 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cf05449caae42dc90ecd3be11bb5dd6 as hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9cf05449caae42dc90ecd3be11bb5dd6 2024-12-07T12:49:12,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:49:12,329 INFO [RS:2;2d46b487c067:44445 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:49:12,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44445-0x100b4b97f370003, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:49:12,329 INFO [RS:2;2d46b487c067:44445 {}] regionserver.HRegionServer(1031): Exiting; stopping=2d46b487c067,44445,1733575714899; zookeeper connection closed. 2024-12-07T12:49:12,329 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@18de2201 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@18de2201 2024-12-07T12:49:12,329 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T12:49:12,333 INFO [M:0;2d46b487c067:37233 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9cf05449caae42dc90ecd3be11bb5dd6 2024-12-07T12:49:12,333 INFO [M:0;2d46b487c067:37233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43841/user/jenkins/test-data/a9b254a2-f0d8-e49d-4c94-71f7964bfc23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9cf05449caae42dc90ecd3be11bb5dd6, entries=3, sequenceid=168, filesize=5.3 K 2024-12-07T12:49:12,334 INFO [M:0;2d46b487c067:37233 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 89ms, sequenceid=168, compaction requested=false 2024-12-07T12:49:12,335 INFO [M:0;2d46b487c067:37233 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:49:12,335 DEBUG [M:0;2d46b487c067:37233 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733575752245Disabling compacts and flushes for region at 1733575752245Disabling writes for close at 1733575752245Obtaining lock to block concurrent updates at 1733575752245Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733575752245Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=69972, getHeapSize=85664, getOffHeapSize=0, getCellsCount=195 at 1733575752245Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733575752246 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733575752246Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733575752259 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733575752259Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733575752269 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733575752283 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733575752283Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733575752295 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733575752306 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733575752306Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c34f218: reopening flushed file at 1733575752317 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6199b9f7: reopening flushed file at 1733575752323 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@133aca0: reopening flushed file at 1733575752328 (+5 ms)Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 89ms, sequenceid=168, compaction requested=false at 1733575752334 (+6 ms)Writing region close event to WAL at 1733575752335 (+1 ms)Closed at 1733575752335 2024-12-07T12:49:12,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741830_1006 (size=81419) 2024-12-07T12:49:12,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741830_1006 (size=81419) 2024-12-07T12:49:12,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36341 is added to blk_1073741830_1006 (size=81419) 2024-12-07T12:49:12,337 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:49:12,338 INFO [M:0;2d46b487c067:37233 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:49:12,338 INFO [M:0;2d46b487c067:37233 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37233 2024-12-07T12:49:12,338 INFO [M:0;2d46b487c067:37233 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:49:12,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:49:12,439 INFO [M:0;2d46b487c067:37233 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:49:12,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x100b4b97f370000, quorum=127.0.0.1:62259, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:49:12,450 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732475 with renewLeaseKey: DEFAULT_16655 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732475 (inode 16655) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733575732180/wal.1733575732475 (inode 16655) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-07T12:49:12,452 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733575724317/wal.1733575724406 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:12,452 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733575747477/wal.1733575747648 with renewLeaseKey: DEFAULT_16767 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:12,454 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575741858 with renewLeaseKey: DEFAULT_16678 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575741858 (inode 16678) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733575732612/wal.1733575741858 (inode 16678) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-07T12:49:12,455 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733575717768/wal.1733575717843 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:12,457 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742511 with renewLeaseKey: DEFAULT_16704 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742511 (inode 16704) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733575742058/wal.1733575742511 (inode 16704) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-07T12:49:12,458 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733575742664/wal.1733575742723 with renewLeaseKey: DEFAULT_16726 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:49:12,460 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal.1733575717633 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal.1733575717633 (inode 16485) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733575717354/wal.1733575717633 (inode 16485) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-07T12:49:12,462 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal.1733575717138 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal.1733575717138 (inode 16462) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733575716923/wal.1733575717138 (inode 16462) Holder DFSClient_NONMAPREDUCE_465337669_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-07T12:49:12,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c5b5cda{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:49:12,468 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32eb964{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:49:12,468 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:49:12,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@582ba343{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:49:12,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bd4c69c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,STOPPED} 2024-12-07T12:49:12,471 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:49:12,471 WARN [BP-1122527667-172.17.0.2-1733575711591 heartbeating to localhost/127.0.0.1:43841 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:49:12,471 WARN [BP-1122527667-172.17.0.2-1733575711591 heartbeating to localhost/127.0.0.1:43841 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1122527667-172.17.0.2-1733575711591 (Datanode Uuid 2e57ee44-cfff-4171-9414-358776e55f15) service to localhost/127.0.0.1:43841 2024-12-07T12:49:12,471 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:49:12,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data5/current/BP-1122527667-172.17.0.2-1733575711591 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:49:12,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data6/current/BP-1122527667-172.17.0.2-1733575711591 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:49:12,493 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:49:12,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70b25848{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:49:12,495 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a8a49ff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:49:12,495 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:49:12,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edd15fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:49:12,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b152b3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,STOPPED} 2024-12-07T12:49:12,496 WARN [BP-1122527667-172.17.0.2-1733575711591 heartbeating to localhost/127.0.0.1:43841 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:49:12,496 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:49:12,496 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:49:12,496 WARN [BP-1122527667-172.17.0.2-1733575711591 heartbeating to localhost/127.0.0.1:43841 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1122527667-172.17.0.2-1733575711591 (Datanode Uuid c9941af7-8f70-4ab7-a32f-11dc9cb8dc32) service to localhost/127.0.0.1:43841 2024-12-07T12:49:12,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data3/current/BP-1122527667-172.17.0.2-1733575711591 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:49:12,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data4/current/BP-1122527667-172.17.0.2-1733575711591 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:49:12,497 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:49:12,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17be2a4f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:49:12,502 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ecf726e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:49:12,502 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:49:12,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19895485{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:49:12,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50901468{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,STOPPED} 2024-12-07T12:49:12,504 WARN [BP-1122527667-172.17.0.2-1733575711591 heartbeating to localhost/127.0.0.1:43841 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:49:12,504 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:49:12,504 WARN [BP-1122527667-172.17.0.2-1733575711591 heartbeating to localhost/127.0.0.1:43841 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1122527667-172.17.0.2-1733575711591 (Datanode Uuid 003b6861-c907-45e5-84dc-58ef67fae0c7) service to localhost/127.0.0.1:43841 2024-12-07T12:49:12,504 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:49:12,505 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data1/current/BP-1122527667-172.17.0.2-1733575711591 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:49:12,505 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/cluster_2a51b0cd-b7dd-ff4c-e73b-5e03de1f3f96/data/data2/current/BP-1122527667-172.17.0.2-1733575711591 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:49:12,505 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:49:12,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73b32aa2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:49:12,511 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e2ddae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:49:12,511 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:49:12,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e3a0561{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:49:12,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@751489f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88a6c016-3a90-a21d-0154-cd217ea488fb/hadoop.log.dir/,STOPPED} 2024-12-07T12:49:12,523 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:49:12,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down