2024-12-02 14:17:03,788 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-02 14:17:03,803 main DEBUG Took 0.012247 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 14:17:03,803 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 14:17:03,803 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 14:17:03,805 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 14:17:03,806 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,815 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 14:17:03,830 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,832 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,832 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,833 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,833 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,835 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,835 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,836 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,836 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,837 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,837 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,838 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,838 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,839 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,839 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,840 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,840 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,841 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,841 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,842 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,842 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,842 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,843 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:17:03,843 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,844 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 14:17:03,846 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:17:03,847 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 14:17:03,849 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 14:17:03,850 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 14:17:03,851 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 14:17:03,852 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 14:17:03,864 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 14:17:03,868 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 14:17:03,870 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 14:17:03,870 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 14:17:03,871 main DEBUG createAppenders(={Console}) 2024-12-02 14:17:03,872 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-12-02 14:17:03,872 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-02 14:17:03,872 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-12-02 14:17:03,873 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 14:17:03,873 main DEBUG OutputStream closed 2024-12-02 14:17:03,874 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 14:17:03,874 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 14:17:03,875 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-12-02 14:17:03,961 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 14:17:03,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 14:17:03,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 14:17:03,966 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 14:17:03,967 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 14:17:03,967 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 14:17:03,968 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 14:17:03,968 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 14:17:03,969 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 14:17:03,969 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 14:17:03,969 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 14:17:03,970 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 14:17:03,971 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 14:17:03,971 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 14:17:03,972 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 14:17:03,972 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 14:17:03,973 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 14:17:03,974 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 14:17:03,977 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 14:17:03,977 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-12-02 14:17:03,979 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 14:17:03,981 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-12-02T14:17:04,300 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375 2024-12-02 14:17:04,304 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 14:17:04,304 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T14:17:04,314 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-12-02T14:17:04,321 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-12-02T14:17:04,350 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T14:17:04,410 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T14:17:04,410 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T14:17:04,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:17:04,446 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a, deleteOnExit=true 2024-12-02T14:17:04,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:17:04,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/test.cache.data in system properties and HBase conf 2024-12-02T14:17:04,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:17:04,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:17:04,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:17:04,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:17:04,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:17:04,559 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T14:17:04,694 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:17:04,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:17:04,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:17:04,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:17:04,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:17:04,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:17:04,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:17:04,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:17:04,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:17:04,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:17:04,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:17:04,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:17:04,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:17:04,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:17:04,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:17:05,617 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T14:17:05,692 INFO [Time-limited test {}] log.Log(170): Logging initialized @2689ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T14:17:05,772 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:17:05,855 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:17:05,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:17:05,888 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:17:05,889 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:17:05,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:17:05,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:17:05,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:17:06,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/java.io.tmpdir/jetty-localhost-43899-hadoop-hdfs-3_4_1-tests_jar-_-any-18006623875059422335/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:17:06,163 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:43899} 2024-12-02T14:17:06,163 INFO [Time-limited test {}] server.Server(415): Started @3160ms 2024-12-02T14:17:06,618 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:17:06,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:17:06,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:17:06,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:17:06,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:17:06,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:17:06,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:17:06,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@330740de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/java.io.tmpdir/jetty-localhost-46415-hadoop-hdfs-3_4_1-tests_jar-_-any-10822054422876522281/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:17:06,799 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:46415} 2024-12-02T14:17:06,799 INFO [Time-limited test {}] server.Server(415): Started @3796ms 2024-12-02T14:17:06,857 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:17:07,016 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:17:07,033 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:17:07,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:17:07,055 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:17:07,055 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:17:07,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:17:07,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:17:07,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bd427b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/java.io.tmpdir/jetty-localhost-38657-hadoop-hdfs-3_4_1-tests_jar-_-any-6378299999768058267/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:17:07,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:38657} 2024-12-02T14:17:07,210 INFO [Time-limited test {}] server.Server(415): Started @4207ms 2024-12-02T14:17:07,212 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:17:07,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:17:07,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:17:07,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:17:07,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:17:07,322 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:17:07,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:17:07,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:17:07,363 WARN [Thread-103 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data1/current/BP-1615503913-172.17.0.3-1733149025365/current, will proceed with Du for space computation calculation, 2024-12-02T14:17:07,364 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data2/current/BP-1615503913-172.17.0.3-1733149025365/current, will proceed with Du for space computation calculation, 2024-12-02T14:17:07,367 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data3/current/BP-1615503913-172.17.0.3-1733149025365/current, will proceed with Du for space computation calculation, 2024-12-02T14:17:07,369 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data4/current/BP-1615503913-172.17.0.3-1733149025365/current, will proceed with Du for space computation calculation, 2024-12-02T14:17:07,424 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:17:07,427 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:17:07,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35f1150e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/java.io.tmpdir/jetty-localhost-42447-hadoop-hdfs-3_4_1-tests_jar-_-any-15301266002484539496/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:17:07,484 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:42447} 2024-12-02T14:17:07,484 INFO [Time-limited test {}] server.Server(415): Started @4481ms 2024-12-02T14:17:07,487 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:17:07,507 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad37b4c98f3efa82 with lease ID 0x4a6ebeed305b7329: Processing first storage report for DS-077d37d8-abd7-40e8-87b1-2528b05f4c15 from datanode DatanodeRegistration(127.0.0.1:45771, datanodeUuid=6dedecca-12aa-42e5-808f-23a2471ac0d0, infoPort=43117, infoSecurePort=0, ipcPort=36635, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365) 2024-12-02T14:17:07,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad37b4c98f3efa82 with lease ID 0x4a6ebeed305b7329: from storage DS-077d37d8-abd7-40e8-87b1-2528b05f4c15 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=6dedecca-12aa-42e5-808f-23a2471ac0d0, infoPort=43117, infoSecurePort=0, ipcPort=36635, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-02T14:17:07,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf86116de5d0cc942 with lease ID 0x4a6ebeed305b7328: Processing first storage report for DS-1184478a-879d-4379-ada9-9b95037f8ac0 from datanode DatanodeRegistration(127.0.0.1:44963, datanodeUuid=68ba86ae-a0c1-47d2-a0fe-7c5ec0e16877, infoPort=39241, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365) 2024-12-02T14:17:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf86116de5d0cc942 with lease ID 0x4a6ebeed305b7328: from storage DS-1184478a-879d-4379-ada9-9b95037f8ac0 node DatanodeRegistration(127.0.0.1:44963, datanodeUuid=68ba86ae-a0c1-47d2-a0fe-7c5ec0e16877, infoPort=39241, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:17:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad37b4c98f3efa82 with lease ID 0x4a6ebeed305b7329: Processing first storage report for DS-e28d288a-b772-4e5c-ad41-7cc2b8dcb9c2 from datanode DatanodeRegistration(127.0.0.1:45771, datanodeUuid=6dedecca-12aa-42e5-808f-23a2471ac0d0, infoPort=43117, infoSecurePort=0, ipcPort=36635, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365) 2024-12-02T14:17:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad37b4c98f3efa82 with lease ID 0x4a6ebeed305b7329: from storage DS-e28d288a-b772-4e5c-ad41-7cc2b8dcb9c2 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=6dedecca-12aa-42e5-808f-23a2471ac0d0, infoPort=43117, infoSecurePort=0, ipcPort=36635, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:17:07,511 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf86116de5d0cc942 with lease ID 0x4a6ebeed305b7328: Processing first storage report for DS-2b538aff-df8e-4b50-9bd9-679dce42705a from datanode DatanodeRegistration(127.0.0.1:44963, datanodeUuid=68ba86ae-a0c1-47d2-a0fe-7c5ec0e16877, infoPort=39241, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365) 2024-12-02T14:17:07,511 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf86116de5d0cc942 with lease ID 0x4a6ebeed305b7328: from storage DS-2b538aff-df8e-4b50-9bd9-679dce42705a node DatanodeRegistration(127.0.0.1:44963, datanodeUuid=68ba86ae-a0c1-47d2-a0fe-7c5ec0e16877, infoPort=39241, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:17:07,671 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data5/current/BP-1615503913-172.17.0.3-1733149025365/current, will proceed with Du for space computation calculation, 2024-12-02T14:17:07,672 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data6/current/BP-1615503913-172.17.0.3-1733149025365/current, will proceed with Du for space computation calculation, 2024-12-02T14:17:07,695 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:17:07,701 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x714de0c11d42048b with lease ID 0x4a6ebeed305b732a: Processing first storage report for DS-9971975c-9085-4344-bd27-b0115609ff6d from datanode DatanodeRegistration(127.0.0.1:39417, datanodeUuid=e58674ab-0b59-436b-aa48-22e8c7f97388, infoPort=36239, infoSecurePort=0, ipcPort=43585, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365) 2024-12-02T14:17:07,701 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x714de0c11d42048b with lease ID 0x4a6ebeed305b732a: from storage DS-9971975c-9085-4344-bd27-b0115609ff6d node DatanodeRegistration(127.0.0.1:39417, datanodeUuid=e58674ab-0b59-436b-aa48-22e8c7f97388, infoPort=36239, infoSecurePort=0, ipcPort=43585, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:17:07,701 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x714de0c11d42048b with lease ID 0x4a6ebeed305b732a: Processing first storage report for DS-59fb4e81-6ed3-4016-914a-cae965888f09 from datanode DatanodeRegistration(127.0.0.1:39417, datanodeUuid=e58674ab-0b59-436b-aa48-22e8c7f97388, infoPort=36239, infoSecurePort=0, ipcPort=43585, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365) 2024-12-02T14:17:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x714de0c11d42048b with lease ID 0x4a6ebeed305b732a: from storage DS-59fb4e81-6ed3-4016-914a-cae965888f09 node DatanodeRegistration(127.0.0.1:39417, datanodeUuid=e58674ab-0b59-436b-aa48-22e8c7f97388, infoPort=36239, infoSecurePort=0, ipcPort=43585, storageInfo=lv=-57;cid=testClusterID;nsid=1956261866;c=1733149025365), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:17:07,914 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375 2024-12-02T14:17:08,013 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/zookeeper_0, clientPort=56104, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:17:08,023 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56104 2024-12-02T14:17:08,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:08,036 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:08,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:17:08,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:17:08,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:17:08,696 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016 with version=8 2024-12-02T14:17:08,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/hbase-staging 2024-12-02T14:17:08,996 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b4ac66777750:0 server-side Connection retries=45 2024-12-02T14:17:09,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,012 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:17:09,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:17:09,179 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:17:09,240 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T14:17:09,248 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T14:17:09,252 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:17:09,281 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22976 (auto-detected) 2024-12-02T14:17:09,282 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-02T14:17:09,301 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42687 2024-12-02T14:17:09,321 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42687 connecting to ZooKeeper ensemble=127.0.0.1:56104 2024-12-02T14:17:09,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:426870x0, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:17:09,353 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42687-0x1009b59793c0000 connected 2024-12-02T14:17:09,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,389 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:17:09,393 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016, hbase.cluster.distributed=false 2024-12-02T14:17:09,415 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:17:09,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42687 2024-12-02T14:17:09,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42687 2024-12-02T14:17:09,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42687 2024-12-02T14:17:09,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42687 2024-12-02T14:17:09,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42687 2024-12-02T14:17:09,535 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b4ac66777750:0 server-side Connection retries=45 2024-12-02T14:17:09,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,537 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:17:09,538 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,538 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:17:09,540 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:17:09,543 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:17:09,544 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40955 2024-12-02T14:17:09,545 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40955 connecting to ZooKeeper ensemble=127.0.0.1:56104 2024-12-02T14:17:09,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:409550x0, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:17:09,561 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40955-0x1009b59793c0001 connected 2024-12-02T14:17:09,562 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:17:09,565 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:17:09,573 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:17:09,575 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:17:09,580 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:17:09,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40955 2024-12-02T14:17:09,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40955 2024-12-02T14:17:09,582 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40955 2024-12-02T14:17:09,582 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40955 2024-12-02T14:17:09,582 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40955 2024-12-02T14:17:09,599 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b4ac66777750:0 server-side Connection retries=45 2024-12-02T14:17:09,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,600 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:17:09,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:17:09,600 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:17:09,601 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:17:09,601 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41225 2024-12-02T14:17:09,603 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41225 connecting to ZooKeeper ensemble=127.0.0.1:56104 2024-12-02T14:17:09,604 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412250x0, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:17:09,613 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41225-0x1009b59793c0002 connected 2024-12-02T14:17:09,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:17:09,613 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:17:09,616 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:17:09,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:17:09,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:17:09,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41225 2024-12-02T14:17:09,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41225 2024-12-02T14:17:09,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41225 2024-12-02T14:17:09,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41225 2024-12-02T14:17:09,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41225 2024-12-02T14:17:09,646 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b4ac66777750:0 server-side Connection retries=45 2024-12-02T14:17:09,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,646 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:17:09,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:17:09,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:17:09,647 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:17:09,647 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:17:09,648 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43009 2024-12-02T14:17:09,649 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43009 connecting to ZooKeeper ensemble=127.0.0.1:56104 2024-12-02T14:17:09,650 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:430090x0, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:17:09,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:430090x0, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:17:09,659 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43009-0x1009b59793c0003 connected 2024-12-02T14:17:09,659 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:17:09,660 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:17:09,661 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:17:09,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:17:09,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43009 2024-12-02T14:17:09,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43009 2024-12-02T14:17:09,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43009 2024-12-02T14:17:09,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43009 2024-12-02T14:17:09,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43009 2024-12-02T14:17:09,684 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b4ac66777750:42687 2024-12-02T14:17:09,685 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b4ac66777750,42687,1733149028802 2024-12-02T14:17:09,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,694 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b4ac66777750,42687,1733149028802 2024-12-02T14:17:09,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:17:09,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:17:09,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:17:09,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,714 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:17:09,715 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b4ac66777750,42687,1733149028802 from backup master directory 2024-12-02T14:17:09,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b4ac66777750,42687,1733149028802 2024-12-02T14:17:09,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:17:09,720 WARN [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:17:09,720 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b4ac66777750,42687,1733149028802 2024-12-02T14:17:09,723 INFO [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T14:17:09,724 INFO [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T14:17:09,785 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/hbase.id] with ID: 2a698aee-e82c-4743-9019-188c37b7d070 2024-12-02T14:17:09,785 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/.tmp/hbase.id 2024-12-02T14:17:09,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:17:09,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:17:09,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:17:09,801 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/.tmp/hbase.id]:[hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/hbase.id] 2024-12-02T14:17:09,841 INFO [master/b4ac66777750:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:17:09,846 INFO [master/b4ac66777750:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:17:09,865 INFO [master/b4ac66777750:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-02T14:17:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:09,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:17:09,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:17:09,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:17:09,902 INFO [master/b4ac66777750:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:17:09,904 INFO [master/b4ac66777750:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:17:09,909 INFO [master/b4ac66777750:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:09,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:17:09,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:17:09,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:17:10,370 INFO [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store 2024-12-02T14:17:10,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:17:10,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:17:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:17:10,397 INFO [master/b4ac66777750:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T14:17:10,401 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:10,403 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:17:10,403 INFO [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:10,403 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:10,405 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:17:10,405 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:10,406 INFO [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:10,407 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733149030403Disabling compacts and flushes for region at 1733149030403Disabling writes for close at 1733149030405 (+2 ms)Writing region close event to WAL at 1733149030405Closed at 1733149030406 (+1 ms) 2024-12-02T14:17:10,409 WARN [master/b4ac66777750:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/.initializing 2024-12-02T14:17:10,409 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/WALs/b4ac66777750,42687,1733149028802 2024-12-02T14:17:10,417 INFO [master/b4ac66777750:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:10,435 INFO [master/b4ac66777750:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b4ac66777750%2C42687%2C1733149028802, suffix=, logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/WALs/b4ac66777750,42687,1733149028802, archiveDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/oldWALs, maxLogs=10 2024-12-02T14:17:10,468 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/WALs/b4ac66777750,42687,1733149028802/b4ac66777750%2C42687%2C1733149028802.1733149030440, exclude list is [], retry=0 2024-12-02T14:17:10,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:10,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:10,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:10,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:10,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-02T14:17:10,534 INFO [master/b4ac66777750:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/WALs/b4ac66777750,42687,1733149028802/b4ac66777750%2C42687%2C1733149028802.1733149030440 2024-12-02T14:17:10,535 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:10,536 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:10,536 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:10,539 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,540 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:17:10,610 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:10,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:10,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:17:10,616 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:10,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:10,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:17:10,620 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:10,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:10,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:17:10,623 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:10,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:10,625 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,628 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,629 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,633 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,634 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,637 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:17:10,641 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:17:10,645 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:10,646 INFO [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68428242, jitterRate=0.019660264253616333}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:17:10,652 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733149030552Initializing all the Stores at 1733149030554 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733149030555 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149030555Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149030556 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149030556Cleaning up temporary data from old regions at 1733149030634 (+78 ms)Region opened successfully at 1733149030652 (+18 ms) 2024-12-02T14:17:10,653 INFO [master/b4ac66777750:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:17:10,686 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ace90a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b4ac66777750/172.17.0.3:0 2024-12-02T14:17:10,718 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:17:10,729 INFO [master/b4ac66777750:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:17:10,729 INFO [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:17:10,732 INFO [master/b4ac66777750:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:17:10,733 INFO [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T14:17:10,738 INFO [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-02T14:17:10,738 INFO [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:17:10,764 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:17:10,772 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:17:10,774 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:17:10,777 INFO [master/b4ac66777750:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:17:10,779 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:17:10,781 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:17:10,783 INFO [master/b4ac66777750:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:17:10,786 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:17:10,787 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:17:10,788 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:17:10,790 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:17:10,807 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:17:10,809 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,816 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b4ac66777750,42687,1733149028802, sessionid=0x1009b59793c0000, setting cluster-up flag (Was=false) 2024-12-02T14:17:10,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,834 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:17:10,836 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b4ac66777750,42687,1733149028802 2024-12-02T14:17:10,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:10,847 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:17:10,848 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b4ac66777750,42687,1733149028802 2024-12-02T14:17:10,854 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:17:10,874 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(746): ClusterId : 2a698aee-e82c-4743-9019-188c37b7d070 2024-12-02T14:17:10,874 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(746): ClusterId : 2a698aee-e82c-4743-9019-188c37b7d070 2024-12-02T14:17:10,874 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(746): ClusterId : 2a698aee-e82c-4743-9019-188c37b7d070 2024-12-02T14:17:10,877 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:17:10,877 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:17:10,877 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:17:10,883 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:17:10,883 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:17:10,883 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:17:10,883 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:17:10,883 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:17:10,883 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:17:10,887 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:17:10,887 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:17:10,887 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:17:10,888 DEBUG [RS:2;b4ac66777750:43009 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58eedee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b4ac66777750/172.17.0.3:0 2024-12-02T14:17:10,888 DEBUG [RS:0;b4ac66777750:40955 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b808057, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b4ac66777750/172.17.0.3:0 2024-12-02T14:17:10,888 DEBUG [RS:1;b4ac66777750:41225 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9493e23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b4ac66777750/172.17.0.3:0 2024-12-02T14:17:10,907 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b4ac66777750:40955 2024-12-02T14:17:10,907 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b4ac66777750:41225 2024-12-02T14:17:10,910 INFO [RS:0;b4ac66777750:40955 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:17:10,910 INFO [RS:1;b4ac66777750:41225 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:17:10,911 INFO [RS:1;b4ac66777750:41225 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:17:10,911 INFO [RS:0;b4ac66777750:40955 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:17:10,911 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:17:10,911 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:17:10,914 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(2659): reportForDuty to master=b4ac66777750,42687,1733149028802 with port=40955, startcode=1733149029496 2024-12-02T14:17:10,914 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(2659): reportForDuty to master=b4ac66777750,42687,1733149028802 with port=41225, startcode=1733149029599 2024-12-02T14:17:10,914 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b4ac66777750:43009 2024-12-02T14:17:10,914 INFO [RS:2;b4ac66777750:43009 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:17:10,914 INFO [RS:2;b4ac66777750:43009 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:17:10,914 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:17:10,916 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(2659): reportForDuty to master=b4ac66777750,42687,1733149028802 with port=43009, startcode=1733149029645 2024-12-02T14:17:10,926 DEBUG [RS:1;b4ac66777750:41225 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:17:10,926 DEBUG [RS:0;b4ac66777750:40955 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:17:10,926 DEBUG [RS:2;b4ac66777750:43009 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:17:10,950 INFO [AsyncFSWAL-0-hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData-prefix:b4ac66777750,42687,1733149028802 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-12-02T14:17:10,965 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55689, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:17:10,965 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58995, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:17:10,965 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59121, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:17:10,967 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:17:10,973 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T14:17:10,979 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T14:17:10,979 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T14:17:10,980 INFO [master/b4ac66777750:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:17:10,989 INFO [master/b4ac66777750:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:17:10,996 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b4ac66777750,42687,1733149028802 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:17:11,004 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b4ac66777750:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:17:11,005 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b4ac66777750:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:17:11,005 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b4ac66777750:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:17:11,005 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b4ac66777750:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:17:11,005 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b4ac66777750:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:17:11,005 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,006 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b4ac66777750:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:17:11,006 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,008 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T14:17:11,008 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T14:17:11,008 WARN [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T14:17:11,008 WARN [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T14:17:11,008 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T14:17:11,008 WARN [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T14:17:11,009 INFO [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733149061009 2024-12-02T14:17:11,010 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:17:11,011 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:17:11,012 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:17:11,013 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:17:11,014 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:17:11,015 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:17:11,015 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:17:11,015 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:17:11,016 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,018 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:17:11,020 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,020 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:17:11,020 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:17:11,020 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:17:11,022 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:17:11,023 INFO [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:17:11,025 WARN [IPC Server handler 3 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:11,025 WARN [IPC Server handler 3 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:11,025 WARN [IPC Server handler 3 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:11,026 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b4ac66777750:0:becomeActiveMaster-HFileCleaner.large.0-1733149031024,5,FailOnTimeoutGroup] 2024-12-02T14:17:11,026 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b4ac66777750:0:becomeActiveMaster-HFileCleaner.small.0-1733149031026,5,FailOnTimeoutGroup] 2024-12-02T14:17:11,027 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,027 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:17:11,028 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,028 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:17:11,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:17:11,037 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:17:11,038 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016 2024-12-02T14:17:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:17:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:17:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:17:11,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:11,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:17:11,096 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:17:11,096 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:17:11,101 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:17:11,101 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:17:11,105 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:17:11,105 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:17:11,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:17:11,109 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,110 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(2659): reportForDuty to master=b4ac66777750,42687,1733149028802 with port=40955, startcode=1733149029496 2024-12-02T14:17:11,110 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(2659): reportForDuty to master=b4ac66777750,42687,1733149028802 with port=41225, startcode=1733149029599 2024-12-02T14:17:11,110 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(2659): reportForDuty to master=b4ac66777750,42687,1733149028802 with port=43009, startcode=1733149029645 2024-12-02T14:17:11,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:17:11,112 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b4ac66777750,40955,1733149029496 2024-12-02T14:17:11,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740 2024-12-02T14:17:11,114 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740 2024-12-02T14:17:11,115 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] master.ServerManager(517): Registering regionserver=b4ac66777750,40955,1733149029496 2024-12-02T14:17:11,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:17:11,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:17:11,118 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:17:11,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:17:11,125 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:11,126 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60288332, jitterRate=-0.10163384675979614}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:17:11,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] master.ServerManager(517): Registering regionserver=b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,127 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016 2024-12-02T14:17:11,127 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42525 2024-12-02T14:17:11,127 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:17:11,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733149031092Initializing all the Stores at 1733149031093 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733149031093Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733149031093Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149031093Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733149031093Cleaning up temporary data from old regions at 1733149031117 (+24 ms)Region opened successfully at 1733149031131 (+14 ms) 2024-12-02T14:17:11,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:17:11,131 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:17:11,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:17:11,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:17:11,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:17:11,133 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:17:11,133 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733149031131Disabling compacts and flushes for region at 1733149031131Disabling writes for close at 1733149031131Writing region close event to WAL at 1733149031132 (+1 ms)Closed at 1733149031133 (+1 ms) 2024-12-02T14:17:11,133 DEBUG [RS:0;b4ac66777750:40955 {}] zookeeper.ZKUtil(111): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b4ac66777750,40955,1733149029496 2024-12-02T14:17:11,133 WARN [RS:0;b4ac66777750:40955 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:17:11,133 INFO [RS:0;b4ac66777750:40955 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:11,133 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,40955,1733149029496 2024-12-02T14:17:11,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:17:11,136 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:17:11,136 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:17:11,136 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016 2024-12-02T14:17:11,137 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42525 2024-12-02T14:17:11,137 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:17:11,137 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b4ac66777750,41225,1733149029599 2024-12-02T14:17:11,137 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42687 {}] master.ServerManager(517): Registering regionserver=b4ac66777750,41225,1733149029599 2024-12-02T14:17:11,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:17:11,143 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016 2024-12-02T14:17:11,143 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42525 2024-12-02T14:17:11,143 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:17:11,143 DEBUG [RS:2;b4ac66777750:43009 {}] zookeeper.ZKUtil(111): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,143 WARN [RS:2;b4ac66777750:43009 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:17:11,144 INFO [RS:2;b4ac66777750:43009 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:11,144 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:17:11,153 DEBUG [RS:1;b4ac66777750:41225 {}] zookeeper.ZKUtil(111): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b4ac66777750,41225,1733149029599 2024-12-02T14:17:11,153 WARN [RS:1;b4ac66777750:41225 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:17:11,153 INFO [RS:1;b4ac66777750:41225 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:11,153 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599 2024-12-02T14:17:11,154 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b4ac66777750,40955,1733149029496] 2024-12-02T14:17:11,156 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b4ac66777750,41225,1733149029599] 2024-12-02T14:17:11,156 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b4ac66777750,43009,1733149029645] 2024-12-02T14:17:11,156 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:17:11,160 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:17:11,170 INFO [RS:2;b4ac66777750:43009 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:17:11,170 INFO [RS:1;b4ac66777750:41225 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:17:11,170 INFO [RS:0;b4ac66777750:40955 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:17:11,188 INFO [RS:1;b4ac66777750:41225 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:17:11,188 INFO [RS:2;b4ac66777750:43009 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:17:11,188 INFO [RS:0;b4ac66777750:40955 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:17:11,194 INFO [RS:1;b4ac66777750:41225 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:17:11,194 INFO [RS:0;b4ac66777750:40955 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:17:11,194 INFO [RS:2;b4ac66777750:43009 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:17:11,194 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,194 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,194 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,195 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:17:11,198 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:17:11,198 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:17:11,201 INFO [RS:1;b4ac66777750:41225 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:17:11,201 INFO [RS:0;b4ac66777750:40955 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:17:11,203 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,203 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,203 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,203 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:17:11,204 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:17:11,204 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 INFO [RS:2;b4ac66777750:43009 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:17:11,204 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,204 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,204 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b4ac66777750:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:17:11,205 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b4ac66777750:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:17:11,205 DEBUG [RS:1;b4ac66777750:41225 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b4ac66777750:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:17:11,205 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:0;b4ac66777750:40955 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b4ac66777750:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:17:11,205 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:17:11,205 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,205 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,206 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,206 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,206 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b4ac66777750:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:17:11,206 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b4ac66777750:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:17:11,206 DEBUG [RS:2;b4ac66777750:43009 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b4ac66777750:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:17:11,217 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,218 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,218 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,40955,1733149029496-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:17:11,218 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,43009,1733149029645-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:17:11,217 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,41225,1733149029599-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:17:11,243 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:17:11,245 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:17:11,245 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,43009,1733149029645-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,245 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,40955,1733149029496-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,245 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,245 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,245 INFO [RS:0;b4ac66777750:40955 {}] regionserver.Replication(171): b4ac66777750,40955,1733149029496 started 2024-12-02T14:17:11,245 INFO [RS:2;b4ac66777750:43009 {}] regionserver.Replication(171): b4ac66777750,43009,1733149029645 started 2024-12-02T14:17:11,251 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:17:11,251 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,41225,1733149029599-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,251 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,252 INFO [RS:1;b4ac66777750:41225 {}] regionserver.Replication(171): b4ac66777750,41225,1733149029599 started 2024-12-02T14:17:11,271 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,271 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,272 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1482): Serving as b4ac66777750,40955,1733149029496, RpcServer on b4ac66777750/172.17.0.3:40955, sessionid=0x1009b59793c0001 2024-12-02T14:17:11,272 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1482): Serving as b4ac66777750,43009,1733149029645, RpcServer on b4ac66777750/172.17.0.3:43009, sessionid=0x1009b59793c0003 2024-12-02T14:17:11,273 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:17:11,273 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:17:11,273 DEBUG [RS:0;b4ac66777750:40955 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b4ac66777750,40955,1733149029496 2024-12-02T14:17:11,273 DEBUG [RS:2;b4ac66777750:43009 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,273 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b4ac66777750,40955,1733149029496' 2024-12-02T14:17:11,273 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b4ac66777750,43009,1733149029645' 2024-12-02T14:17:11,273 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:17:11,273 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:17:11,275 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:17:11,275 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:17:11,276 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:17:11,276 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:17:11,276 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:17:11,276 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:17:11,276 DEBUG [RS:0;b4ac66777750:40955 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b4ac66777750,40955,1733149029496 2024-12-02T14:17:11,276 DEBUG [RS:2;b4ac66777750:43009 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,276 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b4ac66777750,43009,1733149029645' 2024-12-02T14:17:11,276 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b4ac66777750,40955,1733149029496' 2024-12-02T14:17:11,276 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:17:11,276 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:17:11,276 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:11,276 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1482): Serving as b4ac66777750,41225,1733149029599, RpcServer on b4ac66777750/172.17.0.3:41225, sessionid=0x1009b59793c0002 2024-12-02T14:17:11,277 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:17:11,277 DEBUG [RS:1;b4ac66777750:41225 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b4ac66777750,41225,1733149029599 2024-12-02T14:17:11,277 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:17:11,277 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b4ac66777750,41225,1733149029599' 2024-12-02T14:17:11,277 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:17:11,277 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:17:11,277 DEBUG [RS:2;b4ac66777750:43009 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:17:11,278 INFO [RS:2;b4ac66777750:43009 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:17:11,278 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:17:11,278 INFO [RS:2;b4ac66777750:43009 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:17:11,278 DEBUG [RS:0;b4ac66777750:40955 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:17:11,278 INFO [RS:0;b4ac66777750:40955 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:17:11,278 INFO [RS:0;b4ac66777750:40955 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:17:11,278 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:17:11,278 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:17:11,279 DEBUG [RS:1;b4ac66777750:41225 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b4ac66777750,41225,1733149029599 2024-12-02T14:17:11,279 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b4ac66777750,41225,1733149029599' 2024-12-02T14:17:11,279 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:17:11,279 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:17:11,280 DEBUG [RS:1;b4ac66777750:41225 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:17:11,280 INFO [RS:1;b4ac66777750:41225 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:17:11,280 INFO [RS:1;b4ac66777750:41225 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:17:11,311 WARN [b4ac66777750:42687 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:17:11,383 INFO [RS:2;b4ac66777750:43009 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:11,383 INFO [RS:0;b4ac66777750:40955 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:11,383 INFO [RS:1;b4ac66777750:41225 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:11,387 INFO [RS:0;b4ac66777750:40955 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b4ac66777750%2C40955%2C1733149029496, suffix=, logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,40955,1733149029496, archiveDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs, maxLogs=32 2024-12-02T14:17:11,387 INFO [RS:1;b4ac66777750:41225 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b4ac66777750%2C41225%2C1733149029599, suffix=, logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599, archiveDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs, maxLogs=32 2024-12-02T14:17:11,387 INFO [RS:2;b4ac66777750:43009 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b4ac66777750%2C43009%2C1733149029645, suffix=, logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,43009,1733149029645, archiveDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs, maxLogs=32 2024-12-02T14:17:11,409 DEBUG [RS:2;b4ac66777750:43009 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,43009,1733149029645/b4ac66777750%2C43009%2C1733149029645.1733149031391, exclude list is [], retry=0 2024-12-02T14:17:11,414 DEBUG [RS:1;b4ac66777750:41225 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599/b4ac66777750%2C41225%2C1733149029599.1733149031392, exclude list is [], retry=0 2024-12-02T14:17:11,414 DEBUG [RS:0;b4ac66777750:40955 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,40955,1733149029496/b4ac66777750%2C40955%2C1733149029496.1733149031392, exclude list is [], retry=0 2024-12-02T14:17:11,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:11,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:11,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:11,418 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:11,418 WARN [IPC Server handler 0 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:11,418 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:11,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:11,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:11,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:11,422 INFO [RS:2;b4ac66777750:43009 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,43009,1733149029645/b4ac66777750%2C43009%2C1733149029645.1733149031391 2024-12-02T14:17:11,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:11,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:11,424 DEBUG [RS:2;b4ac66777750:43009 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:11,425 INFO [RS:1;b4ac66777750:41225 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599/b4ac66777750%2C41225%2C1733149029599.1733149031392 2024-12-02T14:17:11,429 DEBUG [RS:1;b4ac66777750:41225 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:11,432 INFO [RS:0;b4ac66777750:40955 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,40955,1733149029496/b4ac66777750%2C40955%2C1733149029496.1733149031392 2024-12-02T14:17:11,435 DEBUG [RS:0;b4ac66777750:40955 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:11,563 DEBUG [b4ac66777750:42687 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-02T14:17:11,571 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(204): Hosts are {b4ac66777750=0} racks are {/default-rack=0} 2024-12-02T14:17:11,577 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T14:17:11,577 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T14:17:11,577 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T14:17:11,577 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T14:17:11,577 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T14:17:11,577 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T14:17:11,577 INFO [b4ac66777750:42687 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T14:17:11,577 INFO [b4ac66777750:42687 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T14:17:11,577 INFO [b4ac66777750:42687 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T14:17:11,578 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T14:17:11,586 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,592 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b4ac66777750,43009,1733149029645, state=OPENING 2024-12-02T14:17:11,597 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:17:11,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:11,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:11,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:11,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:11,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,602 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:17:11,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b4ac66777750,43009,1733149029645}] 2024-12-02T14:17:11,779 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:17:11,781 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45075, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:17:11,792 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:17:11,793 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:11,793 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-02T14:17:11,796 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b4ac66777750%2C43009%2C1733149029645.meta, suffix=.meta, logDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,43009,1733149029645, archiveDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs, maxLogs=32 2024-12-02T14:17:11,812 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,43009,1733149029645/b4ac66777750%2C43009%2C1733149029645.meta.1733149031798.meta, exclude list is [], retry=0 2024-12-02T14:17:11,815 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:11,815 WARN [IPC Server handler 2 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:11,815 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:11,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:11,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:11,819 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,43009,1733149029645/b4ac66777750%2C43009%2C1733149029645.meta.1733149031798.meta 2024-12-02T14:17:11,820 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:11,820 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:11,822 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:17:11,824 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:17:11,829 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:17:11,833 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:17:11,833 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:11,834 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:17:11,834 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:17:11,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:17:11,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:17:11,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:17:11,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:17:11,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:17:11,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:17:11,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:17:11,844 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:17:11,844 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:11,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:17:11,845 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:17:11,846 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740 2024-12-02T14:17:11,848 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740 2024-12-02T14:17:11,851 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:17:11,851 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:17:11,852 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:17:11,854 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:17:11,856 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73671740, jitterRate=0.09779447317123413}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:17:11,856 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:17:11,857 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733149031834Writing region info on filesystem at 1733149031834Initializing all the Stores at 1733149031836 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733149031836Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733149031836Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149031836Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733149031836Cleaning up temporary data from old regions at 1733149031851 (+15 ms)Running coprocessor post-open hooks at 1733149031856 (+5 ms)Region opened successfully at 1733149031857 (+1 ms) 2024-12-02T14:17:11,864 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733149031771 2024-12-02T14:17:11,903 DEBUG [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:17:11,904 INFO [RS_OPEN_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:17:11,906 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,909 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b4ac66777750,43009,1733149029645, state=OPEN 2024-12-02T14:17:11,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:17:11,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:17:11,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:17:11,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:17:11,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:17:11,914 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b4ac66777750,43009,1733149029645 2024-12-02T14:17:11,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:17:11,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b4ac66777750,43009,1733149029645 in 310 msec 2024-12-02T14:17:11,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:17:11,933 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 779 msec 2024-12-02T14:17:11,934 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:17:11,935 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:17:11,957 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:17:11,959 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b4ac66777750,43009,1733149029645, seqNum=-1] 2024-12-02T14:17:11,986 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:17:11,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55579, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:17:12,010 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1260 sec 2024-12-02T14:17:12,010 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733149032010, completionTime=-1 2024-12-02T14:17:12,012 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-02T14:17:12,012 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:17:12,037 INFO [master/b4ac66777750:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-02T14:17:12,037 INFO [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733149092037 2024-12-02T14:17:12,037 INFO [master/b4ac66777750:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733149152037 2024-12-02T14:17:12,037 INFO [master/b4ac66777750:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-12-02T14:17:12,039 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-02T14:17:12,045 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,42687,1733149028802-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:12,046 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,42687,1733149028802-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:12,046 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,42687,1733149028802-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:12,048 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b4ac66777750:42687, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:12,048 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:12,049 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:12,055 DEBUG [master/b4ac66777750:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:17:12,079 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.359sec 2024-12-02T14:17:12,080 INFO [master/b4ac66777750:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:17:12,082 INFO [master/b4ac66777750:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:17:12,082 INFO [master/b4ac66777750:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:17:12,083 INFO [master/b4ac66777750:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:17:12,083 INFO [master/b4ac66777750:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:17:12,084 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,42687,1733149028802-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:17:12,084 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,42687,1733149028802-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:17:12,088 DEBUG [master/b4ac66777750:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:17:12,089 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:17:12,089 INFO [master/b4ac66777750:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b4ac66777750,42687,1733149028802-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:12,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73581c7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:17:12,110 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b4ac66777750,42687,-1 for getting cluster id 2024-12-02T14:17:12,113 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:17:12,123 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2a698aee-e82c-4743-9019-188c37b7d070' 2024-12-02T14:17:12,125 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:17:12,125 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2a698aee-e82c-4743-9019-188c37b7d070" 2024-12-02T14:17:12,126 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ebe668f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:17:12,126 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b4ac66777750,42687,-1] 2024-12-02T14:17:12,128 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:17:12,130 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:12,132 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46068, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:17:12,135 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3334605f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:17:12,135 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:17:12,143 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b4ac66777750,43009,1733149029645, seqNum=-1] 2024-12-02T14:17:12,144 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:17:12,146 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37710, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:17:12,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b4ac66777750,42687,1733149028802 2024-12-02T14:17:12,173 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:42525/hbase 2024-12-02T14:17:12,192 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=356, OpenFileDescriptor=595, MaxFileDescriptor=1048576, SystemLoadAverage=313, ProcessCount=11, AvailableMemoryMB=4860 2024-12-02T14:17:12,218 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:12,222 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:12,223 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:12,228 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-72104879, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-72104879, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:12,243 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-72104879/hregion-72104879.1733149032230, exclude list is [], retry=0 2024-12-02T14:17:12,247 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:12,248 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:12,249 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:12,256 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-72104879/hregion-72104879.1733149032230 2024-12-02T14:17:12,256 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:12,256 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 89fe4eabfa335dc8b7ff2a913620b9b9, NAME => 'testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:12,265 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,265 WARN [IPC Server handler 0 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,265 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741838_1014 (size=64) 2024-12-02T14:17:12,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741838_1014 (size=64) 2024-12-02T14:17:12,274 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:12,276 INFO [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,278 INFO [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 89fe4eabfa335dc8b7ff2a913620b9b9 columnFamilyName a 2024-12-02T14:17:12,278 DEBUG [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:12,279 INFO [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] regionserver.HStore(327): Store=89fe4eabfa335dc8b7ff2a913620b9b9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:12,279 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,280 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,281 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,282 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,282 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,287 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,291 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:12,292 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 89fe4eabfa335dc8b7ff2a913620b9b9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73373500, jitterRate=0.093350350856781}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:12,293 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 89fe4eabfa335dc8b7ff2a913620b9b9: Writing region info on filesystem at 1733149032274Initializing all the Stores at 1733149032275 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149032276 (+1 ms)Cleaning up temporary data from old regions at 1733149032282 (+6 ms)Region opened successfully at 1733149032293 (+11 ms) 2024-12-02T14:17:12,293 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 89fe4eabfa335dc8b7ff2a913620b9b9, disabling compactions & flushes 2024-12-02T14:17:12,293 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9. 2024-12-02T14:17:12,293 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9. 2024-12-02T14:17:12,294 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9. after waiting 0 ms 2024-12-02T14:17:12,294 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9. 2024-12-02T14:17:12,294 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9. 2024-12-02T14:17:12,294 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 89fe4eabfa335dc8b7ff2a913620b9b9: Waiting for close lock at 1733149032293Disabling compacts and flushes for region at 1733149032293Disabling writes for close at 1733149032294 (+1 ms)Writing region close event to WAL at 1733149032294Closed at 1733149032294 2024-12-02T14:17:12,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741837_1013 (size=95) 2024-12-02T14:17:12,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741837_1013 (size=95) 2024-12-02T14:17:12,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741837_1013 (size=95) 2024-12-02T14:17:12,307 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:12,307 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-72104879:(num 1733149032230) 2024-12-02T14:17:12,308 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-02T14:17:12,315 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,315 WARN [IPC Server handler 0 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,315 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741839_1015 (size=320) 2024-12-02T14:17:12,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741839_1015 (size=320) 2024-12-02T14:17:12,324 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-02T14:17:12,327 WARN [IPC Server handler 4 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,327 WARN [IPC Server handler 4 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,327 WARN [IPC Server handler 4 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741840_1016 (size=253) 2024-12-02T14:17:12,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741840_1016 (size=253) 2024-12-02T14:17:12,362 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1, size=320 (320bytes) 2024-12-02T14:17:12,363 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-02T14:17:12,363 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-02T14:17:12,363 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1 2024-12-02T14:17:12,369 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1 after 4ms 2024-12-02T14:17:12,373 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,374 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1 took 13ms 2024-12-02T14:17:12,383 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1 so closing down 2024-12-02T14:17:12,384 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:12,386 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-02T14:17:12,390 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000001-wal-1.temp 2024-12-02T14:17:12,391 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:12,392 WARN [IPC Server handler 3 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,392 WARN [IPC Server handler 3 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,392 WARN [IPC Server handler 3 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741841_1017 (size=320) 2024-12-02T14:17:12,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741841_1017 (size=320) 2024-12-02T14:17:12,401 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:12,403 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002 2024-12-02T14:17:12,407 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 28 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-02T14:17:12,407 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1, journal: Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1, size=320 (320bytes) at 1733149032362Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1 so closing down at 1733149032383 (+21 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000001-wal-1.temp at 1733149032390 (+7 ms)3 split writer threads finished at 1733149032391 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733149032401 (+10 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002 at 1733149032403 (+2 ms)Processed 2 edits across 1 Regions in 28 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733149032407 (+4 ms) 2024-12-02T14:17:12,422 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2, size=253 (253bytes) 2024-12-02T14:17:12,422 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2 2024-12-02T14:17:12,423 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2 after 1ms 2024-12-02T14:17:12,426 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,427 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2 took 5ms 2024-12-02T14:17:12,438 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2 so closing down 2024-12-02T14:17:12,438 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:12,441 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-02T14:17:12,442 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002-wal-2.temp 2024-12-02T14:17:12,443 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:12,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741842_1018 (size=253) 2024-12-02T14:17:12,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741842_1018 (size=253) 2024-12-02T14:17:12,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741842_1018 (size=253) 2024-12-02T14:17:12,452 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:12,456 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,459 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-12-02T14:17:12,461 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-02T14:17:12,461 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2, journal: Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2, size=253 (253bytes) at 1733149032422Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2 so closing down at 1733149032438 (+16 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002-wal-2.temp at 1733149032443 (+5 ms)3 split writer threads finished at 1733149032443Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733149032452 (+9 ms)Processed 1 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733149032461 (+9 ms) 2024-12-02T14:17:12,461 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:12,463 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:12,478 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal.1733149032464, exclude list is [], retry=0 2024-12-02T14:17:12,481 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:12,482 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:12,482 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:12,493 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal.1733149032464 2024-12-02T14:17:12,493 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:12,493 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 89fe4eabfa335dc8b7ff2a913620b9b9, NAME => 'testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:12,493 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:12,494 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,494 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,495 INFO [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,497 INFO [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 89fe4eabfa335dc8b7ff2a913620b9b9 columnFamilyName a 2024-12-02T14:17:12,497 DEBUG [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:12,498 INFO [StoreOpener-89fe4eabfa335dc8b7ff2a913620b9b9-1 {}] regionserver.HStore(327): Store=89fe4eabfa335dc8b7ff2a913620b9b9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:12,498 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,500 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,502 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,502 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,502 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,502 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,503 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,503 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,504 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,504 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,504 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,504 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002 2024-12-02T14:17:12,507 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,513 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002 2024-12-02T14:17:12,516 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 89fe4eabfa335dc8b7ff2a913620b9b9 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-02T14:17:12,566 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/.tmp/a/a92664ad67674843a8ad9eadcc996244 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733149032307/Put/seqid=0 2024-12-02T14:17:12,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741844_1020 (size=5170) 2024-12-02T14:17:12,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741844_1020 (size=5170) 2024-12-02T14:17:12,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741844_1020 (size=5170) 2024-12-02T14:17:12,586 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/.tmp/a/a92664ad67674843a8ad9eadcc996244 2024-12-02T14:17:12,635 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/.tmp/a/a92664ad67674843a8ad9eadcc996244 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/a/a92664ad67674843a8ad9eadcc996244 2024-12-02T14:17:12,644 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/a/a92664ad67674843a8ad9eadcc996244, entries=2, sequenceid=2, filesize=5.0 K 2024-12-02T14:17:12,650 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 89fe4eabfa335dc8b7ff2a913620b9b9 in 131ms, sequenceid=2, compaction requested=false; wal=null 2024-12-02T14:17:12,651 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/0000000000000000002 2024-12-02T14:17:12,652 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,652 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,655 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 89fe4eabfa335dc8b7ff2a913620b9b9 2024-12-02T14:17:12,658 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/89fe4eabfa335dc8b7ff2a913620b9b9/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-02T14:17:12,660 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 89fe4eabfa335dc8b7ff2a913620b9b9; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73378524, jitterRate=0.0934252142906189}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:12,661 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 89fe4eabfa335dc8b7ff2a913620b9b9: Writing region info on filesystem at 1733149032494Initializing all the Stores at 1733149032495 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149032495Obtaining lock to block concurrent updates at 1733149032516 (+21 ms)Preparing flush snapshotting stores in 89fe4eabfa335dc8b7ff2a913620b9b9 at 1733149032516Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733149032519 (+3 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733149032219.89fe4eabfa335dc8b7ff2a913620b9b9. at 1733149032519Flushing 89fe4eabfa335dc8b7ff2a913620b9b9/a: creating writer at 1733149032520 (+1 ms)Flushing 89fe4eabfa335dc8b7ff2a913620b9b9/a: appending metadata at 1733149032556 (+36 ms)Flushing 89fe4eabfa335dc8b7ff2a913620b9b9/a: closing flushed file at 1733149032559 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ced8883: reopening flushed file at 1733149032633 (+74 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 89fe4eabfa335dc8b7ff2a913620b9b9 in 131ms, sequenceid=2, compaction requested=false; wal=null at 1733149032650 (+17 ms)Cleaning up temporary data from old regions at 1733149032652 (+2 ms)Region opened successfully at 1733149032661 (+9 ms) 2024-12-02T14:17:12,686 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=368 (was 356) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:49808 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:39126 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:59938 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:38960 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:49850 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:42525/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=691 (was 595) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=313 (was 313), ProcessCount=11 (was 11), AvailableMemoryMB=4844 (was 4860) 2024-12-02T14:17:12,697 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=368, OpenFileDescriptor=691, MaxFileDescriptor=1048576, SystemLoadAverage=313, ProcessCount=11, AvailableMemoryMB=4843 2024-12-02T14:17:12,713 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:12,716 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:12,717 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:12,720 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-81384425, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-81384425, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:12,733 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-81384425/hregion-81384425.1733149032721, exclude list is [], retry=0 2024-12-02T14:17:12,735 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,735 WARN [IPC Server handler 2 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,735 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,737 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:12,737 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:12,740 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-81384425/hregion-81384425.1733149032721 2024-12-02T14:17:12,741 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:12,741 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 08895f86f5085a7dbe7968262fd084fb, NAME => 'testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:12,744 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,744 WARN [IPC Server handler 0 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,745 WARN [IPC Server handler 0 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741846_1022 (size=64) 2024-12-02T14:17:12,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741846_1022 (size=64) 2024-12-02T14:17:12,752 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:12,753 INFO [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,755 INFO [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 08895f86f5085a7dbe7968262fd084fb columnFamilyName a 2024-12-02T14:17:12,755 DEBUG [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:12,756 INFO [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] regionserver.HStore(327): Store=08895f86f5085a7dbe7968262fd084fb/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:12,756 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,757 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,758 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,758 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,759 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,761 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,765 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:12,765 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 08895f86f5085a7dbe7968262fd084fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71919063, jitterRate=0.0716775506734848}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:12,766 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 08895f86f5085a7dbe7968262fd084fb: Writing region info on filesystem at 1733149032752Initializing all the Stores at 1733149032753 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149032753Cleaning up temporary data from old regions at 1733149032759 (+6 ms)Region opened successfully at 1733149032766 (+7 ms) 2024-12-02T14:17:12,766 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 08895f86f5085a7dbe7968262fd084fb, disabling compactions & flushes 2024-12-02T14:17:12,766 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb. 2024-12-02T14:17:12,766 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb. 2024-12-02T14:17:12,766 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb. after waiting 0 ms 2024-12-02T14:17:12,766 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb. 2024-12-02T14:17:12,767 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb. 2024-12-02T14:17:12,767 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 08895f86f5085a7dbe7968262fd084fb: Waiting for close lock at 1733149032766Disabling compacts and flushes for region at 1733149032766Disabling writes for close at 1733149032766Writing region close event to WAL at 1733149032766Closed at 1733149032767 (+1 ms) 2024-12-02T14:17:12,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741845_1021 (size=95) 2024-12-02T14:17:12,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741845_1021 (size=95) 2024-12-02T14:17:12,774 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:12,774 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-81384425:(num 1733149032721) 2024-12-02T14:17:12,775 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-02T14:17:12,778 WARN [IPC Server handler 1 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,778 WARN [IPC Server handler 1 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,778 WARN [IPC Server handler 1 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741847_1023 (size=320) 2024-12-02T14:17:12,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741847_1023 (size=320) 2024-12-02T14:17:12,790 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-02T14:17:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741848_1024 (size=253) 2024-12-02T14:17:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741848_1024 (size=253) 2024-12-02T14:17:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741848_1024 (size=253) 2024-12-02T14:17:12,826 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2, size=253 (253bytes) 2024-12-02T14:17:12,826 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2 2024-12-02T14:17:12,827 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2 after 1ms 2024-12-02T14:17:12,830 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,830 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2 took 4ms 2024-12-02T14:17:12,832 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2 so closing down 2024-12-02T14:17:12,832 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:12,834 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-02T14:17:12,836 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002-wal-2.temp 2024-12-02T14:17:12,836 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:12,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741849_1025 (size=253) 2024-12-02T14:17:12,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741849_1025 (size=253) 2024-12-02T14:17:12,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741849_1025 (size=253) 2024-12-02T14:17:12,844 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:12,846 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002 2024-12-02T14:17:12,846 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-02T14:17:12,846 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2, journal: Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2, size=253 (253bytes) at 1733149032826Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2 so closing down at 1733149032832 (+6 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002-wal-2.temp at 1733149032836 (+4 ms)3 split writer threads finished at 1733149032836Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733149032844 (+8 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002 at 1733149032846 (+2 ms)Processed 1 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733149032846 2024-12-02T14:17:12,861 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1, size=320 (320bytes) 2024-12-02T14:17:12,861 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1 2024-12-02T14:17:12,865 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1 after 4ms 2024-12-02T14:17:12,869 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,869 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1 took 8ms 2024-12-02T14:17:12,875 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1 so closing down 2024-12-02T14:17:12,875 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:12,877 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-02T14:17:12,879 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000001-wal-1.temp 2024-12-02T14:17:12,880 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:12,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741850_1026 (size=320) 2024-12-02T14:17:12,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741850_1026 (size=320) 2024-12-02T14:17:12,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741850_1026 (size=320) 2024-12-02T14:17:12,889 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:12,896 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,898 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002, length=253 2024-12-02T14:17:12,904 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002 2024-12-02T14:17:12,904 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 32 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-02T14:17:12,905 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1, journal: Splitting hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1, size=320 (320bytes) at 1733149032861Finishing writing output for hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1 so closing down at 1733149032875 (+14 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000001-wal-1.temp at 1733149032879 (+4 ms)3 split writer threads finished at 1733149032880 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733149032889 (+9 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002 at 1733149032904 (+15 ms)Processed 2 edits across 1 Regions in 32 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733149032905 (+1 ms) 2024-12-02T14:17:12,905 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:12,907 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:12,922 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal.1733149032907, exclude list is [], retry=0 2024-12-02T14:17:12,924 WARN [IPC Server handler 4 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:12,924 WARN [IPC Server handler 4 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:12,924 WARN [IPC Server handler 4 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:12,926 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:12,926 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:12,929 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal.1733149032907 2024-12-02T14:17:12,929 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:12,929 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 08895f86f5085a7dbe7968262fd084fb, NAME => 'testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:12,929 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:12,929 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,929 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,933 INFO [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,935 INFO [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 08895f86f5085a7dbe7968262fd084fb columnFamilyName a 2024-12-02T14:17:12,935 DEBUG [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:12,936 INFO [StoreOpener-08895f86f5085a7dbe7968262fd084fb-1 {}] regionserver.HStore(327): Store=08895f86f5085a7dbe7968262fd084fb/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:12,936 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,937 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,939 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,940 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002 2024-12-02T14:17:12,943 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:12,945 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002 2024-12-02T14:17:12,945 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 08895f86f5085a7dbe7968262fd084fb 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-02T14:17:12,962 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/.tmp/a/f9b282f1c3774d6ab1692a3d8662e20b is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733149032774/Put/seqid=0 2024-12-02T14:17:12,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741852_1028 (size=5170) 2024-12-02T14:17:12,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741852_1028 (size=5170) 2024-12-02T14:17:12,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741852_1028 (size=5170) 2024-12-02T14:17:12,972 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/.tmp/a/f9b282f1c3774d6ab1692a3d8662e20b 2024-12-02T14:17:12,981 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/.tmp/a/f9b282f1c3774d6ab1692a3d8662e20b as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/a/f9b282f1c3774d6ab1692a3d8662e20b 2024-12-02T14:17:12,988 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/a/f9b282f1c3774d6ab1692a3d8662e20b, entries=2, sequenceid=2, filesize=5.0 K 2024-12-02T14:17:12,988 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 08895f86f5085a7dbe7968262fd084fb in 43ms, sequenceid=2, compaction requested=false; wal=null 2024-12-02T14:17:12,989 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/0000000000000000002 2024-12-02T14:17:12,989 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,989 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,992 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 08895f86f5085a7dbe7968262fd084fb 2024-12-02T14:17:12,994 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/08895f86f5085a7dbe7968262fd084fb/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-02T14:17:12,996 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 08895f86f5085a7dbe7968262fd084fb; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68974038, jitterRate=0.02779325842857361}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:13,000 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 08895f86f5085a7dbe7968262fd084fb: Writing region info on filesystem at 1733149032930Initializing all the Stores at 1733149032931 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149032931Obtaining lock to block concurrent updates at 1733149032945 (+14 ms)Preparing flush snapshotting stores in 08895f86f5085a7dbe7968262fd084fb at 1733149032945Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733149032945Flushing stores of testReplayEditsWrittenIntoWAL,,1733149032714.08895f86f5085a7dbe7968262fd084fb. at 1733149032945Flushing 08895f86f5085a7dbe7968262fd084fb/a: creating writer at 1733149032945Flushing 08895f86f5085a7dbe7968262fd084fb/a: appending metadata at 1733149032961 (+16 ms)Flushing 08895f86f5085a7dbe7968262fd084fb/a: closing flushed file at 1733149032961Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36ce6335: reopening flushed file at 1733149032979 (+18 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 08895f86f5085a7dbe7968262fd084fb in 43ms, sequenceid=2, compaction requested=false; wal=null at 1733149032988 (+9 ms)Cleaning up temporary data from old regions at 1733149032989 (+1 ms)Region opened successfully at 1733149033000 (+11 ms) 2024-12-02T14:17:13,021 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=375 (was 368) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:39218 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:49808 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:49906 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:38960 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:59972 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=767 (was 691) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=313 (was 313), ProcessCount=11 (was 11), AvailableMemoryMB=4838 (was 4843) 2024-12-02T14:17:13,033 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=375, OpenFileDescriptor=767, MaxFileDescriptor=1048576, SystemLoadAverage=313, ProcessCount=11, AvailableMemoryMB=4837 2024-12-02T14:17:13,049 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:13,052 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:13,053 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:13,056 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-89757476, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-89757476, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:13,069 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-89757476/hregion-89757476.1733149033056, exclude list is [], retry=0 2024-12-02T14:17:13,072 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:13,073 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:13,073 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:13,076 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-89757476/hregion-89757476.1733149033056 2024-12-02T14:17:13,076 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:13,076 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => deb09085d2eea61e4a1b4ebc7f56ab30, NAME => 'testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:13,083 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:13,083 WARN [IPC Server handler 2 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:13,083 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:13,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741854_1030 (size=64) 2024-12-02T14:17:13,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741854_1030 (size=64) 2024-12-02T14:17:13,090 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:13,092 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,094 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deb09085d2eea61e4a1b4ebc7f56ab30 columnFamilyName a 2024-12-02T14:17:13,094 DEBUG [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:13,095 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(327): Store=deb09085d2eea61e4a1b4ebc7f56ab30/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:13,095 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,097 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deb09085d2eea61e4a1b4ebc7f56ab30 columnFamilyName b 2024-12-02T14:17:13,097 DEBUG [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:13,098 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(327): Store=deb09085d2eea61e4a1b4ebc7f56ab30/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:13,098 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,100 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deb09085d2eea61e4a1b4ebc7f56ab30 columnFamilyName c 2024-12-02T14:17:13,100 DEBUG [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:13,101 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(327): Store=deb09085d2eea61e4a1b4ebc7f56ab30/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:13,101 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,102 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,102 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,104 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,104 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,104 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:13,106 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:13,109 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:13,110 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened deb09085d2eea61e4a1b4ebc7f56ab30; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66897309, jitterRate=-0.003152415156364441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:13,110 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for deb09085d2eea61e4a1b4ebc7f56ab30: Writing region info on filesystem at 1733149033090Initializing all the Stores at 1733149033091 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149033091Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149033092 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149033092Cleaning up temporary data from old regions at 1733149033104 (+12 ms)Region opened successfully at 1733149033110 (+6 ms) 2024-12-02T14:17:13,110 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing deb09085d2eea61e4a1b4ebc7f56ab30, disabling compactions & flushes 2024-12-02T14:17:13,110 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:13,110 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:13,110 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. after waiting 0 ms 2024-12-02T14:17:13,110 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:13,111 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:13,111 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for deb09085d2eea61e4a1b4ebc7f56ab30: Waiting for close lock at 1733149033110Disabling compacts and flushes for region at 1733149033110Disabling writes for close at 1733149033110Writing region close event to WAL at 1733149033111 (+1 ms)Closed at 1733149033111 2024-12-02T14:17:13,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741853_1029 (size=95) 2024-12-02T14:17:13,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741853_1029 (size=95) 2024-12-02T14:17:13,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741853_1029 (size=95) 2024-12-02T14:17:13,118 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:13,118 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-89757476:(num 1733149033056) 2024-12-02T14:17:13,119 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:13,121 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:13,134 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121, exclude list is [], retry=0 2024-12-02T14:17:13,136 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:17:13,136 WARN [IPC Server handler 2 on default port 42525 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:17:13,136 WARN [IPC Server handler 2 on default port 42525 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:17:13,137 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:13,138 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:13,140 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 2024-12-02T14:17:13,141 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:13,358 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121, size=0 (0bytes) 2024-12-02T14:17:13,358 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 might be still open, length is 0 2024-12-02T14:17:13,358 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 2024-12-02T14:17:13,359 WARN [IPC Server handler 3 on default port 42525 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-12-02T14:17:13,360 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 after 2ms 2024-12-02T14:17:13,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:39258 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44963:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39258 dst: /127.0.0.1:44963 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44963 remote=/127.0.0.1:39258]. Total timeout mills is 60000, 59861 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:13,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:49956 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49956 dst: /127.0.0.1:45771 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:13,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741855_1032 (size=263633) 2024-12-02T14:17:13,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741855_1032 (size=263633) 2024-12-02T14:17:16,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741854_1030 (size=64) 2024-12-02T14:17:16,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:17:16,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741855_1032 (size=263633) 2024-12-02T14:17:17,341 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T14:17:17,361 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 after 4003ms 2024-12-02T14:17:17,365 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:17,366 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 took 4008ms 2024-12-02T14:17:17,372 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733149033121.temp 2024-12-02T14:17:17,380 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000000001-wal.1733149033121.temp 2024-12-02T14:17:17,402 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T14:17:17,517 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121; continuing. 2024-12-02T14:17:17,518 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 so closing down 2024-12-02T14:17:17,518 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:17,519 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741856_1033 (size=263641) 2024-12-02T14:17:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741856_1033 (size=263641) 2024-12-02T14:17:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741856_1033 (size=263641) 2024-12-02T14:17:17,524 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000000001-wal.1733149033121.temp (wrote 3002 edits, skipped 0 edits in 81 ms) 2024-12-02T14:17:17,526 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000000001-wal.1733149033121.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000003002 2024-12-02T14:17:17,526 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 159 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121, size=0, length=0, corrupted=false, cancelled=false 2024-12-02T14:17:17,527 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121, journal: Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121, size=0 (0bytes) at 1733149033358Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000000001-wal.1733149033121.temp at 1733149037380 (+4022 ms)Split 1024 edits, skipped 0 edits. at 1733149037462 (+82 ms)Split 2048 edits, skipped 0 edits. at 1733149037492 (+30 ms)Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 so closing down at 1733149037518 (+26 ms)3 split writer threads finished at 1733149037519 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000000001-wal.1733149033121.temp (wrote 3002 edits, skipped 0 edits in 81 ms) at 1733149037524 (+5 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000000001-wal.1733149033121.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000003002 at 1733149037526 (+2 ms)Processed 3002 edits across 1 Regions in 159 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121, size=0, length=0, corrupted=false, cancelled=false at 1733149037527 (+1 ms) 2024-12-02T14:17:17,529 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149033121 2024-12-02T14:17:17,530 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000003002 2024-12-02T14:17:17,530 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:17,533 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:17,549 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149037533, exclude list is [], retry=0 2024-12-02T14:17:17,552 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:17,553 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:17,553 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:17,559 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149037533 2024-12-02T14:17:17,560 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:17,560 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:17,562 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:17,564 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deb09085d2eea61e4a1b4ebc7f56ab30 columnFamilyName a 2024-12-02T14:17:17,564 DEBUG [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:17,565 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(327): Store=deb09085d2eea61e4a1b4ebc7f56ab30/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:17,565 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:17,566 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deb09085d2eea61e4a1b4ebc7f56ab30 columnFamilyName b 2024-12-02T14:17:17,566 DEBUG [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:17,567 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(327): Store=deb09085d2eea61e4a1b4ebc7f56ab30/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:17,567 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:17,568 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deb09085d2eea61e4a1b4ebc7f56ab30 columnFamilyName c 2024-12-02T14:17:17,568 DEBUG [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:17,569 INFO [StoreOpener-deb09085d2eea61e4a1b4ebc7f56ab30-1 {}] regionserver.HStore(327): Store=deb09085d2eea61e4a1b4ebc7f56ab30/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:17,569 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:17,570 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:17,573 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:17,574 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000003002 2024-12-02T14:17:17,578 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:17,634 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-02T14:17:18,022 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing deb09085d2eea61e4a1b4ebc7f56ab30 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-02T14:17:18,066 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/a/2c2c3e6f12d2475e9f113abbd7b7fc69 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1733149033151/Put/seqid=0 2024-12-02T14:17:18,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741858_1035 (size=50463) 2024-12-02T14:17:18,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741858_1035 (size=50463) 2024-12-02T14:17:18,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741858_1035 (size=50463) 2024-12-02T14:17:18,084 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/a/2c2c3e6f12d2475e9f113abbd7b7fc69 2024-12-02T14:17:18,093 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/a/2c2c3e6f12d2475e9f113abbd7b7fc69 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/a/2c2c3e6f12d2475e9f113abbd7b7fc69 2024-12-02T14:17:18,101 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/a/2c2c3e6f12d2475e9f113abbd7b7fc69, entries=754, sequenceid=754, filesize=49.3 K 2024-12-02T14:17:18,102 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for deb09085d2eea61e4a1b4ebc7f56ab30 in 79ms, sequenceid=754, compaction requested=false; wal=null 2024-12-02T14:17:18,137 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-02T14:17:18,137 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing deb09085d2eea61e4a1b4ebc7f56ab30 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-02T14:17:18,167 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/a/b75b7b52d83a419cbe781b6cdfa9eef4 is 62, key is testReplayEditsWrittenIntoWAL/a:754/1733149033210/Put/seqid=0 2024-12-02T14:17:18,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741859_1036 (size=20072) 2024-12-02T14:17:18,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741859_1036 (size=20072) 2024-12-02T14:17:18,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741859_1036 (size=20072) 2024-12-02T14:17:18,199 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/a/b75b7b52d83a419cbe781b6cdfa9eef4 2024-12-02T14:17:18,258 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/b/ccce796ab10c498bb2280f047fbc1260 is 62, key is testReplayEditsWrittenIntoWAL/b:100/1733149033229/Put/seqid=0 2024-12-02T14:17:18,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741860_1037 (size=35835) 2024-12-02T14:17:18,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741860_1037 (size=35835) 2024-12-02T14:17:18,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741860_1037 (size=35835) 2024-12-02T14:17:18,291 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/b/ccce796ab10c498bb2280f047fbc1260 2024-12-02T14:17:18,306 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/a/b75b7b52d83a419cbe781b6cdfa9eef4 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/a/b75b7b52d83a419cbe781b6cdfa9eef4 2024-12-02T14:17:18,317 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/a/b75b7b52d83a419cbe781b6cdfa9eef4, entries=246, sequenceid=1508, filesize=19.6 K 2024-12-02T14:17:18,321 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/b/ccce796ab10c498bb2280f047fbc1260 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/b/ccce796ab10c498bb2280f047fbc1260 2024-12-02T14:17:18,334 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/b/ccce796ab10c498bb2280f047fbc1260, entries=508, sequenceid=1508, filesize=35.0 K 2024-12-02T14:17:18,334 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for deb09085d2eea61e4a1b4ebc7f56ab30 in 197ms, sequenceid=1508, compaction requested=false; wal=null 2024-12-02T14:17:18,354 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-02T14:17:18,356 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing deb09085d2eea61e4a1b4ebc7f56ab30 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-02T14:17:18,367 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/b/eb738496f86747f194320b20ed77e480 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1733149033244/Put/seqid=0 2024-12-02T14:17:18,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741861_1038 (size=35082) 2024-12-02T14:17:18,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741861_1038 (size=35082) 2024-12-02T14:17:18,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741861_1038 (size=35082) 2024-12-02T14:17:18,380 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/b/eb738496f86747f194320b20ed77e480 2024-12-02T14:17:18,407 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/c/f12cadb7b846431eaf8d966c0d4b9b2c is 62, key is testReplayEditsWrittenIntoWAL/c:100/1733149033275/Put/seqid=0 2024-12-02T14:17:18,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741862_1039 (size=20825) 2024-12-02T14:17:18,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741862_1039 (size=20825) 2024-12-02T14:17:18,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741862_1039 (size=20825) 2024-12-02T14:17:18,450 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/c/f12cadb7b846431eaf8d966c0d4b9b2c 2024-12-02T14:17:18,459 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/b/eb738496f86747f194320b20ed77e480 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/b/eb738496f86747f194320b20ed77e480 2024-12-02T14:17:18,466 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/b/eb738496f86747f194320b20ed77e480, entries=492, sequenceid=2262, filesize=34.3 K 2024-12-02T14:17:18,467 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/c/f12cadb7b846431eaf8d966c0d4b9b2c as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/c/f12cadb7b846431eaf8d966c0d4b9b2c 2024-12-02T14:17:18,474 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/c/f12cadb7b846431eaf8d966c0d4b9b2c, entries=262, sequenceid=2262, filesize=20.3 K 2024-12-02T14:17:18,475 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for deb09085d2eea61e4a1b4ebc7f56ab30 in 121ms, sequenceid=2262, compaction requested=false; wal=null 2024-12-02T14:17:18,487 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1733149033322/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:18,490 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000003002 2024-12-02T14:17:18,491 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-02T14:17:18,491 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing deb09085d2eea61e4a1b4ebc7f56ab30 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-12-02T14:17:18,501 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/c/c9149d2d4a8742a781d49a08ba61a166 is 62, key is testReplayEditsWrittenIntoWAL/c:262/1733149033282/Put/seqid=0 2024-12-02T14:17:18,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741863_1040 (size=50301) 2024-12-02T14:17:18,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741863_1040 (size=50301) 2024-12-02T14:17:18,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741863_1040 (size=50301) 2024-12-02T14:17:18,516 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/c/c9149d2d4a8742a781d49a08ba61a166 2024-12-02T14:17:18,524 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c9149d2d4a8742a781d49a08ba61a166 2024-12-02T14:17:18,526 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/.tmp/c/c9149d2d4a8742a781d49a08ba61a166 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/c/c9149d2d4a8742a781d49a08ba61a166 2024-12-02T14:17:18,537 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c9149d2d4a8742a781d49a08ba61a166 2024-12-02T14:17:18,538 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/c/c9149d2d4a8742a781d49a08ba61a166, entries=739, sequenceid=3002, filesize=49.1 K 2024-12-02T14:17:18,538 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for deb09085d2eea61e4a1b4ebc7f56ab30 in 47ms, sequenceid=3002, compaction requested=false; wal=null 2024-12-02T14:17:18,539 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/0000000000000003002 2024-12-02T14:17:18,540 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:18,540 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:18,541 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:17:18,543 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for deb09085d2eea61e4a1b4ebc7f56ab30 2024-12-02T14:17:18,546 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenIntoWAL/deb09085d2eea61e4a1b4ebc7f56ab30/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-12-02T14:17:18,547 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened deb09085d2eea61e4a1b4ebc7f56ab30; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65856674, jitterRate=-0.018659085035324097}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:17:18,548 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for deb09085d2eea61e4a1b4ebc7f56ab30: Writing region info on filesystem at 1733149037560Initializing all the Stores at 1733149037562 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149037562Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149037562Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149037562Cleaning up temporary data from old regions at 1733149038540 (+978 ms)Region opened successfully at 1733149038547 (+7 ms) 2024-12-02T14:17:18,613 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing deb09085d2eea61e4a1b4ebc7f56ab30, disabling compactions & flushes 2024-12-02T14:17:18,613 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:18,613 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:18,613 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. after waiting 0 ms 2024-12-02T14:17:18,613 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:18,618 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733149033050.deb09085d2eea61e4a1b4ebc7f56ab30. 2024-12-02T14:17:18,618 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for deb09085d2eea61e4a1b4ebc7f56ab30: Waiting for close lock at 1733149038613Disabling compacts and flushes for region at 1733149038613Disabling writes for close at 1733149038613Writing region close event to WAL at 1733149038618 (+5 ms)Closed at 1733149038618 2024-12-02T14:17:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741857_1034 (size=95) 2024-12-02T14:17:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741857_1034 (size=95) 2024-12-02T14:17:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741857_1034 (size=95) 2024-12-02T14:17:18,627 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:18,627 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733149037533) 2024-12-02T14:17:18,653 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=391 (was 375) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@215e4762[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1648371267_22 at /127.0.0.1:46218 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@30f1f55[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1648371267_22 at /127.0.0.1:46174 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:36635 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:42525 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@16f166ab[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1648371267_22 at /127.0.0.1:55438 [Waiting for operation #20] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1648371267_22 at /127.0.0.1:50408 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42525 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36635 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=845 (was 767) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=296 (was 313), ProcessCount=11 (was 11), AvailableMemoryMB=4728 (was 4837) 2024-12-02T14:17:18,666 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=391, OpenFileDescriptor=845, MaxFileDescriptor=1048576, SystemLoadAverage=296, ProcessCount=11, AvailableMemoryMB=4728 2024-12-02T14:17:18,691 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:18,694 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:18,695 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:18,698 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-96577862, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-96577862, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:18,712 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-96577862/hregion-96577862.1733149038698, exclude list is [], retry=0 2024-12-02T14:17:18,716 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:18,717 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:18,718 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:18,725 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-96577862/hregion-96577862.1733149038698 2024-12-02T14:17:18,725 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:18,726 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 6e147212289076c3135958f6b0c60e87, NAME => 'test2727,,1733149038692.6e147212289076c3135958f6b0c60e87.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:18,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741865_1042 (size=43) 2024-12-02T14:17:18,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741865_1042 (size=43) 2024-12-02T14:17:18,744 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733149038692.6e147212289076c3135958f6b0c60e87.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:18,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741865_1042 (size=43) 2024-12-02T14:17:18,750 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,751 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e147212289076c3135958f6b0c60e87 columnFamilyName a 2024-12-02T14:17:18,751 DEBUG [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:18,752 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(327): Store=6e147212289076c3135958f6b0c60e87/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:18,752 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,754 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e147212289076c3135958f6b0c60e87 columnFamilyName b 2024-12-02T14:17:18,754 DEBUG [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:18,755 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(327): Store=6e147212289076c3135958f6b0c60e87/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:18,755 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,757 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e147212289076c3135958f6b0c60e87 columnFamilyName c 2024-12-02T14:17:18,757 DEBUG [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:18,757 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(327): Store=6e147212289076c3135958f6b0c60e87/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:18,758 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,758 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,759 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,760 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,760 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,761 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:18,762 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:18,766 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:18,766 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 6e147212289076c3135958f6b0c60e87; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61256394, jitterRate=-0.08720859885215759}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:18,768 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 6e147212289076c3135958f6b0c60e87: Writing region info on filesystem at 1733149038744Initializing all the Stores at 1733149038749 (+5 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149038749Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149038749Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149038749Cleaning up temporary data from old regions at 1733149038760 (+11 ms)Region opened successfully at 1733149038768 (+8 ms) 2024-12-02T14:17:18,768 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 6e147212289076c3135958f6b0c60e87, disabling compactions & flushes 2024-12-02T14:17:18,768 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:18,768 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:18,768 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. after waiting 0 ms 2024-12-02T14:17:18,768 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:18,769 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:18,769 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 6e147212289076c3135958f6b0c60e87: Waiting for close lock at 1733149038768Disabling compacts and flushes for region at 1733149038768Disabling writes for close at 1733149038768Writing region close event to WAL at 1733149038769 (+1 ms)Closed at 1733149038769 2024-12-02T14:17:18,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741864_1041 (size=95) 2024-12-02T14:17:18,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741864_1041 (size=95) 2024-12-02T14:17:18,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741864_1041 (size=95) 2024-12-02T14:17:18,776 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:18,776 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-96577862:(num 1733149038698) 2024-12-02T14:17:18,776 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:18,779 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:18,798 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779, exclude list is [], retry=0 2024-12-02T14:17:18,801 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:18,801 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:18,802 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:18,804 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779 2024-12-02T14:17:18,805 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:19,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741866_1043 (size=263359) 2024-12-02T14:17:19,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741866_1043 (size=263359) 2024-12-02T14:17:19,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741866_1043 (size=263359) 2024-12-02T14:17:19,045 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779, size=257.2 K (263359bytes) 2024-12-02T14:17:19,045 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779 2024-12-02T14:17:19,046 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779 after 1ms 2024-12-02T14:17:19,049 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:19,050 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779 took 5ms 2024-12-02T14:17:19,059 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733149038779.temp 2024-12-02T14:17:19,067 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000000001-wal.1733149038779.temp 2024-12-02T14:17:19,125 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779 so closing down 2024-12-02T14:17:19,125 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:19,126 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:19,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741867_1044 (size=263359) 2024-12-02T14:17:19,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741867_1044 (size=263359) 2024-12-02T14:17:19,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741867_1044 (size=263359) 2024-12-02T14:17:19,133 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000000001-wal.1733149038779.temp (wrote 3000 edits, skipped 0 edits in 46 ms) 2024-12-02T14:17:19,135 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000000001-wal.1733149038779.temp to hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003000 2024-12-02T14:17:19,135 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 84 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-12-02T14:17:19,135 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779, journal: Splitting hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779, size=257.2 K (263359bytes) at 1733149039045Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000000001-wal.1733149038779.temp at 1733149039067 (+22 ms)Split 1024 edits, skipped 0 edits. at 1733149039077 (+10 ms)Split 2048 edits, skipped 0 edits. at 1733149039103 (+26 ms)Finishing writing output for hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779 so closing down at 1733149039125 (+22 ms)3 split writer threads finished at 1733149039126 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000000001-wal.1733149038779.temp (wrote 3000 edits, skipped 0 edits in 46 ms) at 1733149039133 (+7 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000000001-wal.1733149038779.temp to hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003000 at 1733149039135 (+2 ms)Processed 3000 edits across 1 Regions in 84 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1733149039135 2024-12-02T14:17:19,138 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149038779 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149038779 2024-12-02T14:17:19,139 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003000 2024-12-02T14:17:19,139 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:19,142 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:19,166 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143, exclude list is [], retry=0 2024-12-02T14:17:19,170 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:19,170 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:19,171 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:19,173 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143 2024-12-02T14:17:19,175 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:19,237 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T14:17:19,238 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:19,240 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:17:19,240 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T14:17:19,241 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T14:17:19,241 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:19,242 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-12-02T14:17:19,242 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:19,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741868_1045 (size=263486) 2024-12-02T14:17:19,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741868_1045 (size=263486) 2024-12-02T14:17:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741868_1045 (size=263486) 2024-12-02T14:17:19,441 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143, size=257.3 K (263486bytes) 2024-12-02T14:17:19,441 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143 2024-12-02T14:17:19,442 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143 after 1ms 2024-12-02T14:17:19,445 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:19,447 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143 took 6ms 2024-12-02T14:17:19,452 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1733149039143.temp 2024-12-02T14:17:19,453 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003001-wal.1733149039143.temp 2024-12-02T14:17:19,501 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143 so closing down 2024-12-02T14:17:19,501 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:19,501 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:19,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741869_1046 (size=263486) 2024-12-02T14:17:19,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741869_1046 (size=263486) 2024-12-02T14:17:19,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741869_1046 (size=263486) 2024-12-02T14:17:19,511 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003001-wal.1733149039143.temp (wrote 3000 edits, skipped 0 edits in 39 ms) 2024-12-02T14:17:19,513 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003001-wal.1733149039143.temp to hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000006000 2024-12-02T14:17:19,513 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 66 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-12-02T14:17:19,513 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143, journal: Splitting hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143, size=257.3 K (263486bytes) at 1733149039441Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003001-wal.1733149039143.temp at 1733149039453 (+12 ms)Split 1024 edits, skipped 0 edits. at 1733149039468 (+15 ms)Split 2048 edits, skipped 0 edits. at 1733149039485 (+17 ms)Finishing writing output for hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143 so closing down at 1733149039501 (+16 ms)3 split writer threads finished at 1733149039501Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003001-wal.1733149039143.temp (wrote 3000 edits, skipped 0 edits in 39 ms) at 1733149039511 (+10 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003001-wal.1733149039143.temp to hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000006000 at 1733149039513 (+2 ms)Processed 3000 edits across 1 Regions in 66 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1733149039513 2024-12-02T14:17:19,516 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039143 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149039143 2024-12-02T14:17:19,517 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000006000 2024-12-02T14:17:19,517 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:19,521 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/test2727-manual,16010,1733149038690, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:19,542 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039522, exclude list is [], retry=0 2024-12-02T14:17:19,546 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:19,547 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:19,547 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:19,550 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733149038690/wal.1733149039522 2024-12-02T14:17:19,551 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:19,551 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 6e147212289076c3135958f6b0c60e87, NAME => 'test2727,,1733149038692.6e147212289076c3135958f6b0c60e87.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:19,551 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733149038692.6e147212289076c3135958f6b0c60e87.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:19,551 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,552 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,554 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,555 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e147212289076c3135958f6b0c60e87 columnFamilyName a 2024-12-02T14:17:19,555 DEBUG [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:19,556 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(327): Store=6e147212289076c3135958f6b0c60e87/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:19,556 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,557 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e147212289076c3135958f6b0c60e87 columnFamilyName b 2024-12-02T14:17:19,557 DEBUG [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:19,558 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(327): Store=6e147212289076c3135958f6b0c60e87/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:19,558 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,559 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e147212289076c3135958f6b0c60e87 columnFamilyName c 2024-12-02T14:17:19,559 DEBUG [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:19,560 INFO [StoreOpener-6e147212289076c3135958f6b0c60e87-1 {}] regionserver.HStore(327): Store=6e147212289076c3135958f6b0c60e87/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:19,560 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,561 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,564 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,566 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003000 2024-12-02T14:17:19,569 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:19,623 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003000 2024-12-02T14:17:19,625 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000006000 2024-12-02T14:17:19,627 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:19,665 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000006000 2024-12-02T14:17:19,665 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 6e147212289076c3135958f6b0c60e87 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-12-02T14:17:19,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/a/18e9b1283ab24bae93820273c6b918e2 is 41, key is test2727/a:100/1733149039182/Put/seqid=0 2024-12-02T14:17:19,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741871_1048 (size=84227) 2024-12-02T14:17:19,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741871_1048 (size=84227) 2024-12-02T14:17:19,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741871_1048 (size=84227) 2024-12-02T14:17:19,697 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/a/18e9b1283ab24bae93820273c6b918e2 2024-12-02T14:17:19,735 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/b/b462b614eeb64e6ead72a72f104629cd is 41, key is test2727/b:100/1733149039254/Put/seqid=0 2024-12-02T14:17:19,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741872_1049 (size=84609) 2024-12-02T14:17:19,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741872_1049 (size=84609) 2024-12-02T14:17:19,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741872_1049 (size=84609) 2024-12-02T14:17:19,742 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/b/b462b614eeb64e6ead72a72f104629cd 2024-12-02T14:17:19,768 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/c/a400dab1bdad4539aa9900335dcf6da6 is 41, key is test2727/c:100/1733149039356/Put/seqid=0 2024-12-02T14:17:19,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741873_1050 (size=84609) 2024-12-02T14:17:19,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741873_1050 (size=84609) 2024-12-02T14:17:19,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741873_1050 (size=84609) 2024-12-02T14:17:19,777 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/c/a400dab1bdad4539aa9900335dcf6da6 2024-12-02T14:17:19,784 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/a/18e9b1283ab24bae93820273c6b918e2 as hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/a/18e9b1283ab24bae93820273c6b918e2 2024-12-02T14:17:19,790 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/a/18e9b1283ab24bae93820273c6b918e2, entries=2000, sequenceid=6000, filesize=82.3 K 2024-12-02T14:17:19,791 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/b/b462b614eeb64e6ead72a72f104629cd as hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/b/b462b614eeb64e6ead72a72f104629cd 2024-12-02T14:17:19,799 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/b/b462b614eeb64e6ead72a72f104629cd, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-02T14:17:19,800 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/.tmp/c/a400dab1bdad4539aa9900335dcf6da6 as hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/c/a400dab1bdad4539aa9900335dcf6da6 2024-12-02T14:17:19,806 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/c/a400dab1bdad4539aa9900335dcf6da6, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-02T14:17:19,806 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 6e147212289076c3135958f6b0c60e87 in 141ms, sequenceid=6000, compaction requested=false; wal=null 2024-12-02T14:17:19,807 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000003000 2024-12-02T14:17:19,807 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/0000000000000006000 2024-12-02T14:17:19,808 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,809 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,809 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:19,811 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 6e147212289076c3135958f6b0c60e87 2024-12-02T14:17:19,813 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/test2727/6e147212289076c3135958f6b0c60e87/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-12-02T14:17:19,814 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 6e147212289076c3135958f6b0c60e87; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64499611, jitterRate=-0.03888089954853058}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:19,815 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 6e147212289076c3135958f6b0c60e87: Writing region info on filesystem at 1733149039552Initializing all the Stores at 1733149039553 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149039553Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149039553Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149039553Obtaining lock to block concurrent updates at 1733149039665 (+112 ms)Preparing flush snapshotting stores in 6e147212289076c3135958f6b0c60e87 at 1733149039665Finished memstore snapshotting test2727,,1733149038692.6e147212289076c3135958f6b0c60e87., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1733149039666 (+1 ms)Flushing stores of test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. at 1733149039666Flushing 6e147212289076c3135958f6b0c60e87/a: creating writer at 1733149039666Flushing 6e147212289076c3135958f6b0c60e87/a: appending metadata at 1733149039686 (+20 ms)Flushing 6e147212289076c3135958f6b0c60e87/a: closing flushed file at 1733149039686Flushing 6e147212289076c3135958f6b0c60e87/b: creating writer at 1733149039705 (+19 ms)Flushing 6e147212289076c3135958f6b0c60e87/b: appending metadata at 1733149039733 (+28 ms)Flushing 6e147212289076c3135958f6b0c60e87/b: closing flushed file at 1733149039733Flushing 6e147212289076c3135958f6b0c60e87/c: creating writer at 1733149039749 (+16 ms)Flushing 6e147212289076c3135958f6b0c60e87/c: appending metadata at 1733149039767 (+18 ms)Flushing 6e147212289076c3135958f6b0c60e87/c: closing flushed file at 1733149039767Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@92d3b26: reopening flushed file at 1733149039783 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a162adb: reopening flushed file at 1733149039790 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c5965b6: reopening flushed file at 1733149039799 (+9 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 6e147212289076c3135958f6b0c60e87 in 141ms, sequenceid=6000, compaction requested=false; wal=null at 1733149039806 (+7 ms)Cleaning up temporary data from old regions at 1733149039809 (+3 ms)Region opened successfully at 1733149039815 (+6 ms) 2024-12-02T14:17:19,816 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-12-02T14:17:19,817 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 6e147212289076c3135958f6b0c60e87, disabling compactions & flushes 2024-12-02T14:17:19,817 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:19,817 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:19,817 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. after waiting 0 ms 2024-12-02T14:17:19,817 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:19,818 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733149038692.6e147212289076c3135958f6b0c60e87. 2024-12-02T14:17:19,818 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 6e147212289076c3135958f6b0c60e87: Waiting for close lock at 1733149039816Disabling compacts and flushes for region at 1733149039816Disabling writes for close at 1733149039817 (+1 ms)Writing region close event to WAL at 1733149039818 (+1 ms)Closed at 1733149039818 2024-12-02T14:17:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741870_1047 (size=95) 2024-12-02T14:17:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741870_1047 (size=95) 2024-12-02T14:17:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741870_1047 (size=95) 2024-12-02T14:17:19,824 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:19,824 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733149039522) 2024-12-02T14:17:19,840 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=395 (was 391) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:46326 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:55528 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:50530 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=907 (was 845) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=320 (was 296) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4561 (was 4728) 2024-12-02T14:17:19,852 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=395, OpenFileDescriptor=907, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=11, AvailableMemoryMB=4561 2024-12-02T14:17:19,867 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:19,874 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:19,875 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1733149039875 2024-12-02T14:17:19,882 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875 2024-12-02T14:17:19,884 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:19,885 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c9227c69cce9e3e5e98a873ccc8dfb3, NAME => 'testSequentialEditLogSeqNum,,1733149039868.7c9227c69cce9e3e5e98a873ccc8dfb3.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:19,886 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1733149039868.7c9227c69cce9e3e5e98a873ccc8dfb3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:19,886 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,886 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,887 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3 doesn't exist for region: 7c9227c69cce9e3e5e98a873ccc8dfb3 on table testSequentialEditLogSeqNum 2024-12-02T14:17:19,887 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 7c9227c69cce9e3e5e98a873ccc8dfb3 on table testSequentialEditLogSeqNum 2024-12-02T14:17:19,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741875_1052 (size=62) 2024-12-02T14:17:19,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741875_1052 (size=62) 2024-12-02T14:17:19,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741875_1052 (size=62) 2024-12-02T14:17:19,898 INFO [StoreOpener-7c9227c69cce9e3e5e98a873ccc8dfb3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,899 INFO [StoreOpener-7c9227c69cce9e3e5e98a873ccc8dfb3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c9227c69cce9e3e5e98a873ccc8dfb3 columnFamilyName a 2024-12-02T14:17:19,900 DEBUG [StoreOpener-7c9227c69cce9e3e5e98a873ccc8dfb3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:19,900 INFO [StoreOpener-7c9227c69cce9e3e5e98a873ccc8dfb3-1 {}] regionserver.HStore(327): Store=7c9227c69cce9e3e5e98a873ccc8dfb3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:19,900 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,901 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,901 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,902 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,902 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,904 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7c9227c69cce9e3e5e98a873ccc8dfb3 2024-12-02T14:17:19,906 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:19,907 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7c9227c69cce9e3e5e98a873ccc8dfb3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60519029, jitterRate=-0.09819619357585907}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:19,907 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7c9227c69cce9e3e5e98a873ccc8dfb3: Writing region info on filesystem at 1733149039886Initializing all the Stores at 1733149039897 (+11 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149039897Cleaning up temporary data from old regions at 1733149039902 (+5 ms)Region opened successfully at 1733149039907 (+5 ms) 2024-12-02T14:17:19,921 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7c9227c69cce9e3e5e98a873ccc8dfb3 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-12-02T14:17:19,949 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/.tmp/a/977b5b0098334543ae73122dde7e2a9d is 81, key is testSequentialEditLogSeqNum/a:x0/1733149039907/Put/seqid=0 2024-12-02T14:17:19,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741876_1053 (size=5833) 2024-12-02T14:17:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741876_1053 (size=5833) 2024-12-02T14:17:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741876_1053 (size=5833) 2024-12-02T14:17:19,957 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/.tmp/a/977b5b0098334543ae73122dde7e2a9d 2024-12-02T14:17:19,964 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/.tmp/a/977b5b0098334543ae73122dde7e2a9d as hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/a/977b5b0098334543ae73122dde7e2a9d 2024-12-02T14:17:19,970 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/a/977b5b0098334543ae73122dde7e2a9d, entries=10, sequenceid=13, filesize=5.7 K 2024-12-02T14:17:19,972 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 7c9227c69cce9e3e5e98a873ccc8dfb3 in 51ms, sequenceid=13, compaction requested=false 2024-12-02T14:17:19,972 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7c9227c69cce9e3e5e98a873ccc8dfb3: 2024-12-02T14:17:19,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:17:19,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:17:19,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:17:19,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:17:19,985 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:17:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741874_1051 (size=1843) 2024-12-02T14:17:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741874_1051 (size=1843) 2024-12-02T14:17:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741874_1051 (size=1843) 2024-12-02T14:17:20,005 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875, size=1.8 K (1843bytes) 2024-12-02T14:17:20,006 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875 2024-12-02T14:17:20,006 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875 after 0ms 2024-12-02T14:17:20,010 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:20,010 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875 took 5ms 2024-12-02T14:17:20,013 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875 so closing down 2024-12-02T14:17:20,013 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:20,015 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733149039875.temp 2024-12-02T14:17:20,018 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000003-wal.1733149039875.temp 2024-12-02T14:17:20,019 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:20,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741877_1054 (size=1477) 2024-12-02T14:17:20,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741877_1054 (size=1477) 2024-12-02T14:17:20,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741877_1054 (size=1477) 2024-12-02T14:17:20,040 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000003-wal.1733149039875.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:20,042 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000003-wal.1733149039875.temp to hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000020 2024-12-02T14:17:20,042 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 31 ms; skipped=2; WAL=hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875, size=1.8 K, length=1843, corrupted=false, cancelled=false 2024-12-02T14:17:20,042 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875, journal: Splitting hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875, size=1.8 K (1843bytes) at 1733149040006Finishing writing output for hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875 so closing down at 1733149040013 (+7 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000003-wal.1733149039875.temp at 1733149040018 (+5 ms)3 split writer threads finished at 1733149040019 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000003-wal.1733149039875.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733149040040 (+21 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000003-wal.1733149039875.temp to hdfs://localhost:42525/hbase/data/default/testSequentialEditLogSeqNum/7c9227c69cce9e3e5e98a873ccc8dfb3/recovered.edits/0000000000000000020 at 1733149040042 (+2 ms)Processed 17 edits across 1 Regions in 31 ms; skipped=2; WAL=hdfs://localhost:42525/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733149039867/wal.1733149039875, size=1.8 K, length=1843, corrupted=false, cancelled=false at 1733149040042 2024-12-02T14:17:20,059 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=400 (was 395) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:55528 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=943 (was 907) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=320 (was 320), ProcessCount=11 (was 11), AvailableMemoryMB=4555 (was 4561) 2024-12-02T14:17:20,073 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=400, OpenFileDescriptor=943, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=11, AvailableMemoryMB=4554 2024-12-02T14:17:20,090 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:20,092 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:20,093 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:20,096 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-15882966, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-15882966, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:20,109 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-15882966/hregion-15882966.1733149040096, exclude list is [], retry=0 2024-12-02T14:17:20,113 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:20,113 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:20,114 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:20,155 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-15882966/hregion-15882966.1733149040096 2024-12-02T14:17:20,155 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:20,156 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => a076ac423b6066c428e680b62e31c488, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:20,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741879_1056 (size=70) 2024-12-02T14:17:20,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741879_1056 (size=70) 2024-12-02T14:17:20,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741879_1056 (size=70) 2024-12-02T14:17:20,170 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:20,172 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,173 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName a 2024-12-02T14:17:20,173 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:20,174 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:20,174 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,176 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName b 2024-12-02T14:17:20,176 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:20,176 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:20,176 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,178 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName c 2024-12-02T14:17:20,178 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:20,179 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:20,179 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,180 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,180 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,182 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,182 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,183 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:20,184 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,187 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:20,188 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened a076ac423b6066c428e680b62e31c488; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59821402, jitterRate=-0.10859164595603943}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:20,189 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for a076ac423b6066c428e680b62e31c488: Writing region info on filesystem at 1733149040170Initializing all the Stores at 1733149040171 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149040171Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149040171Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149040171Cleaning up temporary data from old regions at 1733149040182 (+11 ms)Region opened successfully at 1733149040189 (+7 ms) 2024-12-02T14:17:20,189 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing a076ac423b6066c428e680b62e31c488, disabling compactions & flushes 2024-12-02T14:17:20,189 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:20,190 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:20,190 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. after waiting 0 ms 2024-12-02T14:17:20,190 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:20,190 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:20,190 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for a076ac423b6066c428e680b62e31c488: Waiting for close lock at 1733149040189Disabling compacts and flushes for region at 1733149040189Disabling writes for close at 1733149040190 (+1 ms)Writing region close event to WAL at 1733149040190Closed at 1733149040190 2024-12-02T14:17:20,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741878_1055 (size=95) 2024-12-02T14:17:20,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741878_1055 (size=95) 2024-12-02T14:17:20,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741878_1055 (size=95) 2024-12-02T14:17:20,200 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-15882966/hregion-15882966.1733149040096 not finished, retry = 0 2024-12-02T14:17:20,303 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:20,304 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-15882966:(num 1733149040096) 2024-12-02T14:17:20,304 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:20,306 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:20,327 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306, exclude list is [], retry=0 2024-12-02T14:17:20,330 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:20,331 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:20,331 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:20,333 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 2024-12-02T14:17:20,334 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:20,334 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => a076ac423b6066c428e680b62e31c488, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:20,334 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:20,334 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,334 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,336 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,337 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName a 2024-12-02T14:17:20,337 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:20,337 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:20,337 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,338 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName b 2024-12-02T14:17:20,338 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:20,339 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:20,339 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,340 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName c 2024-12-02T14:17:20,340 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:20,340 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:20,340 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,341 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,342 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,343 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,343 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,344 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:20,345 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:20,346 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened a076ac423b6066c428e680b62e31c488; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63392546, jitterRate=-0.055377453565597534}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:20,347 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for a076ac423b6066c428e680b62e31c488: Writing region info on filesystem at 1733149040334Initializing all the Stores at 1733149040335 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149040335Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149040335Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149040335Cleaning up temporary data from old regions at 1733149040343 (+8 ms)Region opened successfully at 1733149040347 (+4 ms) 2024-12-02T14:17:20,351 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1733149040350/Put/seqid=0 2024-12-02T14:17:20,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741881_1058 (size=4826) 2024-12-02T14:17:20,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741881_1058 (size=4826) 2024-12-02T14:17:20,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741881_1058 (size=4826) 2024-12-02T14:17:20,362 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42525/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in a076ac423b6066c428e680b62e31c488/a 2024-12-02T14:17:20,369 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-12-02T14:17:20,369 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-02T14:17:20,369 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a076ac423b6066c428e680b62e31c488: 2024-12-02T14:17:20,371 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/a/00f3ed9cff394d1d8a0892ee7955157d_SeqId_3_ 2024-12-02T14:17:20,371 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42525/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into a076ac423b6066c428e680b62e31c488/a as hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/a/00f3ed9cff394d1d8a0892ee7955157d_SeqId_3_ - updating store file list. 2024-12-02T14:17:20,376 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 00f3ed9cff394d1d8a0892ee7955157d_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:20,377 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/a/00f3ed9cff394d1d8a0892ee7955157d_SeqId_3_ into a076ac423b6066c428e680b62e31c488/a 2024-12-02T14:17:20,377 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42525/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into a076ac423b6066c428e680b62e31c488/a (new location: hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/a/00f3ed9cff394d1d8a0892ee7955157d_SeqId_3_) 2024-12-02T14:17:20,419 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306, size=0 (0bytes) 2024-12-02T14:17:20,419 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 might be still open, length is 0 2024-12-02T14:17:20,419 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 2024-12-02T14:17:20,420 WARN [IPC Server handler 1 on default port 42525 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-12-02T14:17:20,420 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 after 1ms 2024-12-02T14:17:22,701 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:55646 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55646 dst: /127.0.0.1:45771 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45771 remote=/127.0.0.1:55646]. Total timeout mills is 60000, 57682 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:22,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:50614 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:39417:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50614 dst: /127.0.0.1:39417 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:22,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:46408 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44963:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46408 dst: /127.0.0.1:44963 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:22,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741880_1059 (size=474) 2024-12-02T14:17:22,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741880_1059 (size=474) 2024-12-02T14:17:24,421 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 after 4002ms 2024-12-02T14:17:24,424 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:24,425 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 took 4006ms 2024-12-02T14:17:24,427 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306; continuing. 2024-12-02T14:17:24,427 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 so closing down 2024-12-02T14:17:24,427 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:24,429 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1733149040306.temp 2024-12-02T14:17:24,430 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005-wal.1733149040306.temp 2024-12-02T14:17:24,431 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:24,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741882_1060 (size=259) 2024-12-02T14:17:24,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741882_1060 (size=259) 2024-12-02T14:17:24,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741882_1060 (size=259) 2024-12-02T14:17:24,442 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005-wal.1733149040306.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:24,443 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005-wal.1733149040306.temp to hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005 2024-12-02T14:17:24,443 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 18 ms; skipped=1; WAL=hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306, size=0, length=0, corrupted=false, cancelled=false 2024-12-02T14:17:24,443 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306, journal: Splitting hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306, size=0 (0bytes) at 1733149040419Finishing writing output for hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 so closing down at 1733149044427 (+4008 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005-wal.1733149040306.temp at 1733149044430 (+3 ms)3 split writer threads finished at 1733149044431 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005-wal.1733149040306.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733149044442 (+11 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005-wal.1733149040306.temp to hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005 at 1733149044443 (+1 ms)Processed 2 edits across 1 Regions in 18 ms; skipped=1; WAL=hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306, size=0, length=0, corrupted=false, cancelled=false at 1733149044443 2024-12-02T14:17:24,445 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149040306 2024-12-02T14:17:24,446 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005 2024-12-02T14:17:24,446 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:24,449 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:24,462 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149044449, exclude list is [], retry=0 2024-12-02T14:17:24,465 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:24,465 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:24,465 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:24,467 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149044449 2024-12-02T14:17:24,467 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:24,468 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => a076ac423b6066c428e680b62e31c488, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:24,468 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:24,468 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,468 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,469 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,470 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName a 2024-12-02T14:17:24,470 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:24,476 DEBUG [StoreFileOpener-a076ac423b6066c428e680b62e31c488-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 00f3ed9cff394d1d8a0892ee7955157d_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:24,477 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/a/00f3ed9cff394d1d8a0892ee7955157d_SeqId_3_ 2024-12-02T14:17:24,477 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:24,477 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,478 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName b 2024-12-02T14:17:24,478 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:24,478 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:24,479 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,479 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a076ac423b6066c428e680b62e31c488 columnFamilyName c 2024-12-02T14:17:24,479 DEBUG [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:24,480 INFO [StoreOpener-a076ac423b6066c428e680b62e31c488-1 {}] regionserver.HStore(327): Store=a076ac423b6066c428e680b62e31c488/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:24,480 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,481 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,483 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,483 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005 2024-12-02T14:17:24,485 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:24,486 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005 2024-12-02T14:17:24,486 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a076ac423b6066c428e680b62e31c488 3/3 column families, dataSize=58 B heapSize=904 B 2024-12-02T14:17:24,501 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/.tmp/a/b0f1066864c24d3e9d64397b2c99178b is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1733149040382/Put/seqid=0 2024-12-02T14:17:24,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741884_1062 (size=5149) 2024-12-02T14:17:24,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741884_1062 (size=5149) 2024-12-02T14:17:24,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741884_1062 (size=5149) 2024-12-02T14:17:24,509 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/.tmp/a/b0f1066864c24d3e9d64397b2c99178b 2024-12-02T14:17:24,515 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/.tmp/a/b0f1066864c24d3e9d64397b2c99178b as hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/a/b0f1066864c24d3e9d64397b2c99178b 2024-12-02T14:17:24,520 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/a/b0f1066864c24d3e9d64397b2c99178b, entries=1, sequenceid=5, filesize=5.0 K 2024-12-02T14:17:24,521 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for a076ac423b6066c428e680b62e31c488 in 35ms, sequenceid=5, compaction requested=false; wal=null 2024-12-02T14:17:24,521 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/0000000000000000005 2024-12-02T14:17:24,523 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,523 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,523 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:24,525 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for a076ac423b6066c428e680b62e31c488 2024-12-02T14:17:24,527 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/a076ac423b6066c428e680b62e31c488/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-12-02T14:17:24,528 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened a076ac423b6066c428e680b62e31c488; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60968674, jitterRate=-0.09149596095085144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:24,528 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for a076ac423b6066c428e680b62e31c488: Writing region info on filesystem at 1733149044468Initializing all the Stores at 1733149044469 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149044469Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149044469Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149044469Obtaining lock to block concurrent updates at 1733149044486 (+17 ms)Preparing flush snapshotting stores in a076ac423b6066c428e680b62e31c488 at 1733149044486Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1733149044486Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. at 1733149044486Flushing a076ac423b6066c428e680b62e31c488/a: creating writer at 1733149044486Flushing a076ac423b6066c428e680b62e31c488/a: appending metadata at 1733149044500 (+14 ms)Flushing a076ac423b6066c428e680b62e31c488/a: closing flushed file at 1733149044500Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ef48a9c: reopening flushed file at 1733149044514 (+14 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for a076ac423b6066c428e680b62e31c488 in 35ms, sequenceid=5, compaction requested=false; wal=null at 1733149044521 (+7 ms)Cleaning up temporary data from old regions at 1733149044523 (+2 ms)Region opened successfully at 1733149044528 (+5 ms) 2024-12-02T14:17:24,532 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing a076ac423b6066c428e680b62e31c488, disabling compactions & flushes 2024-12-02T14:17:24,532 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:24,532 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:24,532 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. after waiting 0 ms 2024-12-02T14:17:24,532 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:24,533 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733149040091.a076ac423b6066c428e680b62e31c488. 2024-12-02T14:17:24,534 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for a076ac423b6066c428e680b62e31c488: Waiting for close lock at 1733149044532Disabling compacts and flushes for region at 1733149044532Disabling writes for close at 1733149044532Writing region close event to WAL at 1733149044533 (+1 ms)Closed at 1733149044533 2024-12-02T14:17:24,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741883_1061 (size=95) 2024-12-02T14:17:24,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741883_1061 (size=95) 2024-12-02T14:17:24,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741883_1061 (size=95) 2024-12-02T14:17:24,540 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:24,540 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733149044449) 2024-12-02T14:17:24,556 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=404 (was 400) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42525 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_161961445_22 at /127.0.0.1:55690 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:42525 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:44559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_161961445_22 at /127.0.0.1:50640 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1004 (was 943) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=303 (was 320), ProcessCount=11 (was 11), AvailableMemoryMB=4522 (was 4554) 2024-12-02T14:17:24,571 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=404, OpenFileDescriptor=1004, MaxFileDescriptor=1048576, SystemLoadAverage=303, ProcessCount=11, AvailableMemoryMB=4520 2024-12-02T14:17:24,594 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:24,599 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T14:17:24,603 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b4ac66777750,42687,1733149028802 2024-12-02T14:17:24,606 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@64123686 2024-12-02T14:17:24,607 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T14:17:24,609 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38040, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T14:17:24,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:17:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-12-02T14:17:24,622 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T14:17:24,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-12-02T14:17:24,624 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:24,626 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T14:17:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:17:24,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741885_1063 (size=694) 2024-12-02T14:17:24,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741885_1063 (size=694) 2024-12-02T14:17:24,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741885_1063 (size=694) 2024-12-02T14:17:24,642 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0738ea0faaf2c5867685e891599fe105, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016 2024-12-02T14:17:24,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741886_1064 (size=77) 2024-12-02T14:17:24,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741886_1064 (size=77) 2024-12-02T14:17:24,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741886_1064 (size=77) 2024-12-02T14:17:24,653 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:24,653 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 0738ea0faaf2c5867685e891599fe105, disabling compactions & flushes 2024-12-02T14:17:24,653 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:24,654 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:24,654 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. after waiting 0 ms 2024-12-02T14:17:24,654 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:24,654 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:24,654 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0738ea0faaf2c5867685e891599fe105: Waiting for close lock at 1733149044653Disabling compacts and flushes for region at 1733149044653Disabling writes for close at 1733149044654 (+1 ms)Writing region close event to WAL at 1733149044654Closed at 1733149044654 2024-12-02T14:17:24,655 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T14:17:24,662 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1733149044655"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733149044655"}]},"ts":"1733149044655"} 2024-12-02T14:17:24,666 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T14:17:24,667 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T14:17:24,670 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733149044667"}]},"ts":"1733149044667"} 2024-12-02T14:17:24,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-12-02T14:17:24,674 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b4ac66777750=0} racks are {/default-rack=0} 2024-12-02T14:17:24,676 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T14:17:24,676 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T14:17:24,676 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T14:17:24,676 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T14:17:24,676 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T14:17:24,676 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T14:17:24,676 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T14:17:24,676 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T14:17:24,676 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T14:17:24,676 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T14:17:24,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN}] 2024-12-02T14:17:24,680 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN 2024-12-02T14:17:24,681 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN; state=OFFLINE, location=b4ac66777750,41225,1733149029599; forceNewPlan=false, retain=false 2024-12-02T14:17:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:17:24,834 INFO [b4ac66777750:42687 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T14:17:24,835 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPENING, regionLocation=b4ac66777750,41225,1733149029599 2024-12-02T14:17:24,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN because future has completed 2024-12-02T14:17:24,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599}] 2024-12-02T14:17:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:17:24,994 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:17:24,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33899, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:17:25,000 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,001 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0738ea0faaf2c5867685e891599fe105, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:25,001 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,001 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:25,001 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,001 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,003 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,005 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf1 2024-12-02T14:17:25,005 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:25,005 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:25,005 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,007 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf2 2024-12-02T14:17:25,007 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:25,008 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:25,008 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,008 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,009 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,009 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,010 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,011 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-02T14:17:25,012 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,014 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:25,015 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0738ea0faaf2c5867685e891599fe105; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72825207, jitterRate=0.08518014848232269}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-02T14:17:25,015 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,016 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0738ea0faaf2c5867685e891599fe105: Running coprocessor pre-open hook at 1733149045002Writing region info on filesystem at 1733149045002Initializing all the Stores at 1733149045002Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149045002Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149045003 (+1 ms)Cleaning up temporary data from old regions at 1733149045010 (+7 ms)Running coprocessor post-open hooks at 1733149045015 (+5 ms)Region opened successfully at 1733149045016 (+1 ms) 2024-12-02T14:17:25,017 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., pid=6, masterSystemTime=1733149044993 2024-12-02T14:17:25,020 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,020 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,022 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPEN, openSeqNum=2, regionLocation=b4ac66777750,41225,1733149029599 2024-12-02T14:17:25,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599 because future has completed 2024-12-02T14:17:25,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T14:17:25,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599 in 186 msec 2024-12-02T14:17:25,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T14:17:25,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN in 351 msec 2024-12-02T14:17:25,033 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T14:17:25,034 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733149045034"}]},"ts":"1733149045034"} 2024-12-02T14:17:25,036 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-12-02T14:17:25,037 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T14:17:25,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 423 msec 2024-12-02T14:17:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:17:25,260 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-12-02T14:17:25,260 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-12-02T14:17:25,261 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T14:17:25,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-12-02T14:17:25,267 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T14:17:25,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-12-02T14:17:25,280 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=2] 2024-12-02T14:17:25,282 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:17:25,283 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:17:25,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=0738ea0faaf2c5867685e891599fe105, source=b4ac66777750,41225,1733149029599, destination=b4ac66777750,40955,1733149029496, warming up region on b4ac66777750,40955,1733149029496 2024-12-02T14:17:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:17:25,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=0738ea0faaf2c5867685e891599fe105, source=b4ac66777750,41225,1733149029599, destination=b4ac66777750,40955,1733149029496, running balancer 2024-12-02T14:17:25,303 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41767, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:17:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE 2024-12-02T14:17:25,303 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE 2024-12-02T14:17:25,306 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=CLOSING, regionLocation=b4ac66777750,41225,1733149029599 2024-12-02T14:17:25,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(7855): Warmup {ENCODED => 0738ea0faaf2c5867685e891599fe105, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:25,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE because future has completed 2024-12-02T14:17:25,313 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T14:17:25,313 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599}] 2024-12-02T14:17:25,314 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf1 2024-12-02T14:17:25,314 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:25,315 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:25,315 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,316 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf2 2024-12-02T14:17:25,316 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:25,317 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(1722): Closing 0738ea0faaf2c5867685e891599fe105, disabling compactions & flushes 2024-12-02T14:17:25,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. after waiting 0 ms 2024-12-02T14:17:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] regionserver.HRegion(1676): Region close journal for 0738ea0faaf2c5867685e891599fe105: Waiting for close lock at 1733149045317Disabling compacts and flushes for region at 1733149045317Disabling writes for close at 1733149045317Writing region close event to WAL at 1733149045320 (+3 ms)Closed at 1733149045320 2024-12-02T14:17:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-12-02T14:17:25,471 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,471 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T14:17:25,472 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 0738ea0faaf2c5867685e891599fe105, disabling compactions & flushes 2024-12-02T14:17:25,472 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,472 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,472 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. after waiting 0 ms 2024-12-02T14:17:25,472 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,472 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 0738ea0faaf2c5867685e891599fe105 2/2 column families, dataSize=31 B heapSize=616 B 2024-12-02T14:17:25,490 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b is 35, key is r1/cf1:q/1733149045284/Put/seqid=0 2024-12-02T14:17:25,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741887_1065 (size=4783) 2024-12-02T14:17:25,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741887_1065 (size=4783) 2024-12-02T14:17:25,497 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b 2024-12-02T14:17:25,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741887_1065 (size=4783) 2024-12-02T14:17:25,504 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b 2024-12-02T14:17:25,509 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b, entries=1, sequenceid=5, filesize=4.7 K 2024-12-02T14:17:25,510 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 0738ea0faaf2c5867685e891599fe105 in 38ms, sequenceid=5, compaction requested=false 2024-12-02T14:17:25,510 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-12-02T14:17:25,515 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T14:17:25,517 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,517 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 0738ea0faaf2c5867685e891599fe105: Waiting for close lock at 1733149045472Running coprocessor pre-close hooks at 1733149045472Disabling compacts and flushes for region at 1733149045472Disabling writes for close at 1733149045472Obtaining lock to block concurrent updates at 1733149045472Preparing flush snapshotting stores in 0738ea0faaf2c5867685e891599fe105 at 1733149045472Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1733149045473 (+1 ms)Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. at 1733149045473Flushing 0738ea0faaf2c5867685e891599fe105/cf1: creating writer at 1733149045473Flushing 0738ea0faaf2c5867685e891599fe105/cf1: appending metadata at 1733149045489 (+16 ms)Flushing 0738ea0faaf2c5867685e891599fe105/cf1: closing flushed file at 1733149045489Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2287d135: reopening flushed file at 1733149045503 (+14 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 0738ea0faaf2c5867685e891599fe105 in 38ms, sequenceid=5, compaction requested=false at 1733149045510 (+7 ms)Writing region close event to WAL at 1733149045512 (+2 ms)Running coprocessor post-close hooks at 1733149045515 (+3 ms)Closed at 1733149045517 (+2 ms) 2024-12-02T14:17:25,518 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 0738ea0faaf2c5867685e891599fe105 move to b4ac66777750,40955,1733149029496 record at close sequenceid=5 2024-12-02T14:17:25,521 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,521 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=CLOSED 2024-12-02T14:17:25,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599 because future has completed 2024-12-02T14:17:25,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T14:17:25,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599 in 212 msec 2024-12-02T14:17:25,530 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE; state=CLOSED, location=b4ac66777750,40955,1733149029496; forceNewPlan=false, retain=false 2024-12-02T14:17:25,680 INFO [b4ac66777750:42687 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T14:17:25,680 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPENING, regionLocation=b4ac66777750,40955,1733149029496 2024-12-02T14:17:25,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE because future has completed 2024-12-02T14:17:25,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496}] 2024-12-02T14:17:25,842 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,843 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 0738ea0faaf2c5867685e891599fe105, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:25,843 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,843 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:25,843 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,843 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,845 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,846 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf1 2024-12-02T14:17:25,846 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:25,853 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b 2024-12-02T14:17:25,853 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:25,853 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,854 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf2 2024-12-02T14:17:25,854 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:25,855 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:25,855 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,856 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,857 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,858 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,858 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,859 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-02T14:17:25,860 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,861 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 0738ea0faaf2c5867685e891599fe105; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67001966, jitterRate=-0.0015929043292999268}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-02T14:17:25,862 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:25,862 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 0738ea0faaf2c5867685e891599fe105: Running coprocessor pre-open hook at 1733149045843Writing region info on filesystem at 1733149045843Initializing all the Stores at 1733149045845 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149045845Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149045845Cleaning up temporary data from old regions at 1733149045858 (+13 ms)Running coprocessor post-open hooks at 1733149045862 (+4 ms)Region opened successfully at 1733149045862 2024-12-02T14:17:25,863 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., pid=9, masterSystemTime=1733149045836 2024-12-02T14:17:25,866 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,866 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:25,867 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPEN, openSeqNum=9, regionLocation=b4ac66777750,40955,1733149029496 2024-12-02T14:17:25,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496 because future has completed 2024-12-02T14:17:25,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-02T14:17:25,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496 in 187 msec 2024-12-02T14:17:25,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE in 572 msec 2024-12-02T14:17:25,910 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:17:25,912 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35862, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:17:25,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.3:39038 deadline: 1733149105916, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=40955 startCode=1733149029496. As of locationSeqNum=5. 2024-12-02T14:17:25,924 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=40955 startCode=1733149029496. As of locationSeqNum=5. 2024-12-02T14:17:25,925 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=40955 startCode=1733149029496. As of locationSeqNum=5. 2024-12-02T14:17:25,925 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,40955,1733149029496, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=40955 startCode=1733149029496. As of locationSeqNum=5. 2024-12-02T14:17:26,039 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:17:26,041 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35876, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:17:26,053 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0738ea0faaf2c5867685e891599fe105 2/2 column families, dataSize=50 B heapSize=720 B 2024-12-02T14:17:26,077 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf1/9b46d6d3846d49f7969e5b6b1634f765 is 29, key is r1/cf1:/1733149046042/DeleteFamily/seqid=0 2024-12-02T14:17:26,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741888_1066 (size=4906) 2024-12-02T14:17:26,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741888_1066 (size=4906) 2024-12-02T14:17:26,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741888_1066 (size=4906) 2024-12-02T14:17:26,086 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf1/9b46d6d3846d49f7969e5b6b1634f765 2024-12-02T14:17:26,092 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9b46d6d3846d49f7969e5b6b1634f765 2024-12-02T14:17:26,107 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf2/a884281702cc44859d3462b45f52a51b is 29, key is r1/cf2:/1733149046042/DeleteFamily/seqid=0 2024-12-02T14:17:26,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741889_1067 (size=4906) 2024-12-02T14:17:26,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741889_1067 (size=4906) 2024-12-02T14:17:26,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741889_1067 (size=4906) 2024-12-02T14:17:26,114 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf2/a884281702cc44859d3462b45f52a51b 2024-12-02T14:17:26,120 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a884281702cc44859d3462b45f52a51b 2024-12-02T14:17:26,121 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf1/9b46d6d3846d49f7969e5b6b1634f765 as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/9b46d6d3846d49f7969e5b6b1634f765 2024-12-02T14:17:26,126 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9b46d6d3846d49f7969e5b6b1634f765 2024-12-02T14:17:26,127 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/9b46d6d3846d49f7969e5b6b1634f765, entries=1, sequenceid=12, filesize=4.8 K 2024-12-02T14:17:26,127 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf2/a884281702cc44859d3462b45f52a51b as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/a884281702cc44859d3462b45f52a51b 2024-12-02T14:17:26,134 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a884281702cc44859d3462b45f52a51b 2024-12-02T14:17:26,134 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/a884281702cc44859d3462b45f52a51b, entries=1, sequenceid=12, filesize=4.8 K 2024-12-02T14:17:26,136 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 0738ea0faaf2c5867685e891599fe105 in 83ms, sequenceid=12, compaction requested=false 2024-12-02T14:17:26,136 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0738ea0faaf2c5867685e891599fe105: 2024-12-02T14:17:26,139 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T14:17:26,141 DEBUG [Time-limited test {}] regionserver.HStore(1541): 0738ea0faaf2c5867685e891599fe105/cf1 is initiating major compaction (all files) 2024-12-02T14:17:26,141 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:17:26,141 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:26,142 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 0738ea0faaf2c5867685e891599fe105/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,142 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b, hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/9b46d6d3846d49f7969e5b6b1634f765] into tmpdir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp, totalSize=9.5 K 2024-12-02T14:17:26,143 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e3e3e76db9ca4317b23c23cf2a99ab9b, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733149045284 2024-12-02T14:17:26,144 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9b46d6d3846d49f7969e5b6b1634f765, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-02T14:17:26,156 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 0738ea0faaf2c5867685e891599fe105#cf1#compaction#16 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:17:26,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741890_1068 (size=4626) 2024-12-02T14:17:26,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741890_1068 (size=4626) 2024-12-02T14:17:26,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741890_1068 (size=4626) 2024-12-02T14:17:26,173 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf1/8a5b335b996246f68bd429a355ac2e81 as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/8a5b335b996246f68bd429a355ac2e81 2024-12-02T14:17:26,191 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 0738ea0faaf2c5867685e891599fe105/cf1 of 0738ea0faaf2c5867685e891599fe105 into 8a5b335b996246f68bd429a355ac2e81(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:17:26,191 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 0738ea0faaf2c5867685e891599fe105: 2024-12-02T14:17:26,191 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-02T14:17:26,191 DEBUG [Time-limited test {}] regionserver.HStore(1541): 0738ea0faaf2c5867685e891599fe105/cf2 is initiating major compaction (all files) 2024-12-02T14:17:26,191 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:17:26,191 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:17:26,192 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 0738ea0faaf2c5867685e891599fe105/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,192 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/a884281702cc44859d3462b45f52a51b] into tmpdir=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp, totalSize=4.8 K 2024-12-02T14:17:26,192 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a884281702cc44859d3462b45f52a51b, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-02T14:17:26,199 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 0738ea0faaf2c5867685e891599fe105#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:17:26,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741891_1069 (size=4592) 2024-12-02T14:17:26,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741891_1069 (size=4592) 2024-12-02T14:17:26,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741891_1069 (size=4592) 2024-12-02T14:17:26,220 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/.tmp/cf2/4171494aafe34a7f932a78c2aa740180 as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/4171494aafe34a7f932a78c2aa740180 2024-12-02T14:17:26,227 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 0738ea0faaf2c5867685e891599fe105/cf2 of 0738ea0faaf2c5867685e891599fe105 into 4171494aafe34a7f932a78c2aa740180(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:17:26,227 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 0738ea0faaf2c5867685e891599fe105: 2024-12-02T14:17:26,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=0738ea0faaf2c5867685e891599fe105, source=b4ac66777750,40955,1733149029496, destination=b4ac66777750,41225,1733149029599, warming up region on b4ac66777750,41225,1733149029599 2024-12-02T14:17:26,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=0738ea0faaf2c5867685e891599fe105, source=b4ac66777750,40955,1733149029496, destination=b4ac66777750,41225,1733149029599, running balancer 2024-12-02T14:17:26,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE 2024-12-02T14:17:26,234 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE 2024-12-02T14:17:26,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(7855): Warmup {ENCODED => 0738ea0faaf2c5867685e891599fe105, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:26,235 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=CLOSING, regionLocation=b4ac66777750,40955,1733149029496 2024-12-02T14:17:26,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:26,237 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,238 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf1 2024-12-02T14:17:26,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE because future has completed 2024-12-02T14:17:26,238 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:26,239 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T14:17:26,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496}] 2024-12-02T14:17:26,246 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/8a5b335b996246f68bd429a355ac2e81 2024-12-02T14:17:26,251 INFO [StoreFileOpener-0738ea0faaf2c5867685e891599fe105-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9b46d6d3846d49f7969e5b6b1634f765 2024-12-02T14:17:26,251 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/9b46d6d3846d49f7969e5b6b1634f765 2024-12-02T14:17:26,256 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b 2024-12-02T14:17:26,256 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:26,257 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,258 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf2 2024-12-02T14:17:26,258 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:26,264 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/4171494aafe34a7f932a78c2aa740180 2024-12-02T14:17:26,268 INFO [StoreFileOpener-0738ea0faaf2c5867685e891599fe105-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a884281702cc44859d3462b45f52a51b 2024-12-02T14:17:26,268 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/a884281702cc44859d3462b45f52a51b 2024-12-02T14:17:26,268 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(1722): Closing 0738ea0faaf2c5867685e891599fe105, disabling compactions & flushes 2024-12-02T14:17:26,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. after waiting 0 ms 2024-12-02T14:17:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41225 {}] regionserver.HRegion(1676): Region close journal for 0738ea0faaf2c5867685e891599fe105: Waiting for close lock at 1733149046268Disabling compacts and flushes for region at 1733149046268Disabling writes for close at 1733149046268Writing region close event to WAL at 1733149046269 (+1 ms)Closed at 1733149046269 2024-12-02T14:17:26,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-12-02T14:17:26,393 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,394 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T14:17:26,394 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 0738ea0faaf2c5867685e891599fe105, disabling compactions & flushes 2024-12-02T14:17:26,394 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,394 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,394 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. after waiting 0 ms 2024-12-02T14:17:26,394 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,394 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b, hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/9b46d6d3846d49f7969e5b6b1634f765] to archive 2024-12-02T14:17:26,398 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:17:26,402 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b to hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/e3e3e76db9ca4317b23c23cf2a99ab9b 2024-12-02T14:17:26,405 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/9b46d6d3846d49f7969e5b6b1634f765 to hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/9b46d6d3846d49f7969e5b6b1634f765 2024-12-02T14:17:26,422 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/a884281702cc44859d3462b45f52a51b] to archive 2024-12-02T14:17:26,423 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:17:26,427 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/a884281702cc44859d3462b45f52a51b to hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/a884281702cc44859d3462b45f52a51b 2024-12-02T14:17:26,444 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-12-02T14:17:26,446 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,446 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 0738ea0faaf2c5867685e891599fe105: Waiting for close lock at 1733149046394Running coprocessor pre-close hooks at 1733149046394Disabling compacts and flushes for region at 1733149046394Disabling writes for close at 1733149046394Writing region close event to WAL at 1733149046429 (+35 ms)Running coprocessor post-close hooks at 1733149046446 (+17 ms)Closed at 1733149046446 2024-12-02T14:17:26,446 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 0738ea0faaf2c5867685e891599fe105 move to b4ac66777750,41225,1733149029599 record at close sequenceid=12 2024-12-02T14:17:26,449 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,453 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=CLOSED 2024-12-02T14:17:26,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496 because future has completed 2024-12-02T14:17:26,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-02T14:17:26,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496 in 219 msec 2024-12-02T14:17:26,463 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE; state=CLOSED, location=b4ac66777750,41225,1733149029599; forceNewPlan=false, retain=false 2024-12-02T14:17:26,614 INFO [b4ac66777750:42687 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T14:17:26,615 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPENING, regionLocation=b4ac66777750,41225,1733149029599 2024-12-02T14:17:26,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE because future has completed 2024-12-02T14:17:26,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599}] 2024-12-02T14:17:26,780 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,780 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 0738ea0faaf2c5867685e891599fe105, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:26,781 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,781 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:26,781 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,781 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,783 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,784 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf1 2024-12-02T14:17:26,784 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:26,793 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/8a5b335b996246f68bd429a355ac2e81 2024-12-02T14:17:26,793 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:26,794 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,795 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf2 2024-12-02T14:17:26,795 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:26,802 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/4171494aafe34a7f932a78c2aa740180 2024-12-02T14:17:26,802 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:26,802 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,803 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,804 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,805 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,805 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,806 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-02T14:17:26,807 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,808 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 0738ea0faaf2c5867685e891599fe105; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72587985, jitterRate=0.08164526522159576}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-02T14:17:26,808 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,809 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 0738ea0faaf2c5867685e891599fe105: Running coprocessor pre-open hook at 1733149046781Writing region info on filesystem at 1733149046781Initializing all the Stores at 1733149046782 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149046782Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149046782Cleaning up temporary data from old regions at 1733149046805 (+23 ms)Running coprocessor post-open hooks at 1733149046808 (+3 ms)Region opened successfully at 1733149046809 (+1 ms) 2024-12-02T14:17:26,810 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., pid=12, masterSystemTime=1733149046771 2024-12-02T14:17:26,813 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,813 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,814 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPEN, openSeqNum=18, regionLocation=b4ac66777750,41225,1733149029599 2024-12-02T14:17:26,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599 because future has completed 2024-12-02T14:17:26,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-02T14:17:26,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,41225,1733149029599 in 203 msec 2024-12-02T14:17:26,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, REOPEN/MOVE in 592 msec 2024-12-02T14:17:26,836 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:17:26,838 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52206, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:17:26,840 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server b4ac66777750,41225,1733149029599: testing ***** 2024-12-02T14:17:26,840 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-02T14:17:26,842 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-02T14:17:26,844 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-02T14:17:26,848 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-02T14:17:26,850 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-02T14:17:26,859 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "Verbose": false, "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 470245024 }, "NonHeapMemoryUsage": { "committed": 172032000, "init": 7667712, "max": -1, "used": 169385528 }, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "b4ac66777750", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2074, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 1, "ProcessCallTime_max": 11, "ProcessCallTime_mean": 4, "ProcessCallTime_25th_percentile": 3, "ProcessCallTime_median": 6, "ProcessCallTime_75th_percentile": 8, "ProcessCallTime_90th_percentile": 10, "ProcessCallTime_95th_percentile": 10, "ProcessCallTime_98th_percentile": 10, "ProcessCallTime_99th_percentile": 10, "ProcessCallTime_99.9th_percentile": 10, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 0, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 11, "TotalCallTime_mean": 4, "TotalCallTime_25th_percentile": 3, "TotalCallTime_median": 6, "TotalCallTime_75th_percentile": 8, "TotalCallTime_90th_percentile": 10, "TotalCallTime_95th_percentile": 10, "TotalCallTime_98th_percentile": 10, "TotalCallTime_99th_percentile": 10, "TotalCallTime_99.9th_percentile": 10, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 174, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 130, "ResponseSize_90th_percentile": 156, "ResponseSize_95th_percentile": 165, "ResponseSize_98th_percentile": 170, "ResponseSize_99th_percentile": 172, "ResponseSize_99.9th_percentile": 173, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 348 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "b4ac66777750", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:56104", "tag.serverName": "b4ac66777750,43009,1733149029645", "tag.clusterId": "2a698aee-e82c-4743-9019-188c37b7d070", "tag.Context": "regionserver", "tag.Hostname": "b4ac66777750", "regionCount": 1, "storeCount": 4, "hlogFileCount": 2, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 74, "memStoreHeapSize": 1248, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1733149029645, "averageRegionSize": 74, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 1.4, "writeRequestRatePerSecond": 0.4, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 199680, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 1, "activeScanners": 0, "totalRequestCount": 7, "totalRowActionRequestCount": 9, "readRequestCount": 7, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 2, "rpcGetRequestCount": 1, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 4, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 2, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 4, "ScanTime_min": 0, "ScanTime_max": 2, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 1, "ScanTime_75th_percentile": 1, "ScanTime_90th_percentile": 1, "ScanTime_95th_percentile": 1, "ScanTime_98th_percentile": 1, "ScanTime_99th_percentile": 1, "ScanTime_99.9th_percentile": 1, "ScanTime_TimeRangeCount_0-1": 4, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 2, "Put_min": 1, "Put_max": 8, "Put_mean": 4, "Put_25th_percentile": 2, "Put_median": 4, "Put_75th_percentile": 6, "Put_90th_percentile": 7, "Put_95th_percentile": 7, "Put_98th_percentile": 7, "Put_99th_percentile": 7, "Put_99.9th_percentile": 7, "Put_TimeRangeCount_0-1": 2, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 1, "Get_min": 1, "Get_max": 1, "Get_mean": 1, "Get_25th_percentile": 1, "Get_median": 1, "Get_75th_percentile": 1, "Get_90th_percentile": 1, "Get_95th_percentile": 1, "Get_98th_percentile": 1, "Get_99th_percentile": 1, "Get_99.9th_percentile": 1, "Get_TimeRangeCount_0-1": 1, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 4, "ScanSize_min": 0, "ScanSize_max": 144, "ScanSize_mean": 72, "ScanSize_25th_percentile": 36, "ScanSize_median": 72, "ScanSize_75th_percentile": 108, "ScanSize_90th_percentile": 129, "ScanSize_95th_percentile": 136, "ScanSize_98th_percentile": 141, "ScanSize_99th_percentile": 142, "ScanSize_99.9th_percentile": 143, "ScanSize_SizeRangeCount_0-10": 4, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-12-02T14:17:26,864 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42687 {}] master.MasterRpcServices(700): b4ac66777750,41225,1733149029599 reported a fatal error: ***** ABORTING region server b4ac66777750,41225,1733149029599: testing ***** 2024-12-02T14:17:26,866 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b4ac66777750,41225,1733149029599' ***** 2024-12-02T14:17:26,866 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-12-02T14:17:26,867 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:17:26,867 INFO [RS:1;b4ac66777750:41225 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-12-02T14:17:26,867 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:17:26,867 INFO [RS:1;b4ac66777750:41225 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-12-02T14:17:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40955 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.3:35876 deadline: 1733149106867, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=41225 startCode=1733149029599. As of locationSeqNum=12. 2024-12-02T14:17:26,867 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(3091): Received CLOSE for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,868 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(956): aborting server b4ac66777750,41225,1733149029599 2024-12-02T14:17:26,868 INFO [RS:1;b4ac66777750:41225 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:17:26,868 INFO [RS:1;b4ac66777750:41225 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b4ac66777750:41225. 2024-12-02T14:17:26,868 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0738ea0faaf2c5867685e891599fe105, disabling compactions & flushes 2024-12-02T14:17:26,868 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,868 DEBUG [RS:1;b4ac66777750:41225 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:17:26,868 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,868 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. after waiting 0 ms 2024-12-02T14:17:26,868 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,869 DEBUG [RS:1;b4ac66777750:41225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:26,869 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,40955,1733149029496, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,40955,1733149029496, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=41225 startCode=1733149029599. As of locationSeqNum=12. 2024-12-02T14:17:26,869 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,40955,1733149029496, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=41225 startCode=1733149029599. As of locationSeqNum=12. 2024-12-02T14:17:26,869 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,40955,1733149029496, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b4ac66777750 port=41225 startCode=1733149029599. As of locationSeqNum=12. 2024-12-02T14:17:26,869 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T14:17:26,869 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1325): Online Regions={0738ea0faaf2c5867685e891599fe105=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.} 2024-12-02T14:17:26,869 DEBUG [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1351): Waiting on 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:26,873 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,873 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0738ea0faaf2c5867685e891599fe105: Waiting for close lock at 1733149046868Running coprocessor pre-close hooks at 1733149046868Disabling compacts and flushes for region at 1733149046868Disabling writes for close at 1733149046868Writing region close event to WAL at 1733149046873 (+5 ms)Running coprocessor post-close hooks at 1733149046873Closed at 1733149046873 2024-12-02T14:17:26,873 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:26,948 INFO [regionserver/b4ac66777750:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:17:26,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server b4ac66777750,41225,1733149029599 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:26,980 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=12, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server b4ac66777750,41225,1733149029599 aborting 2024-12-02T14:17:26,980 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=12 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server b4ac66777750,41225,1733149029599 aborting 2024-12-02T14:17:26,980 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=12 from cache 2024-12-02T14:17:27,070 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(976): stopping server b4ac66777750,41225,1733149029599; all regions closed. 2024-12-02T14:17:27,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741834_1010 (size=1404) 2024-12-02T14:17:27,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741834_1010 (size=1404) 2024-12-02T14:17:27,074 DEBUG [RS:1;b4ac66777750:41225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:27,074 INFO [RS:1;b4ac66777750:41225 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:17:27,074 INFO [RS:1;b4ac66777750:41225 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:17:27,075 INFO [RS:1;b4ac66777750:41225 {}] hbase.ChoreService(370): Chore service for: regionserver/b4ac66777750:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:17:27,075 INFO [regionserver/b4ac66777750:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:17:27,075 INFO [RS:1;b4ac66777750:41225 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:17:27,075 INFO [RS:1;b4ac66777750:41225 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:17:27,075 INFO [RS:1;b4ac66777750:41225 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:17:27,075 INFO [RS:1;b4ac66777750:41225 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:17:27,076 INFO [RS:1;b4ac66777750:41225 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41225 2024-12-02T14:17:27,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b4ac66777750,41225,1733149029599 2024-12-02T14:17:27,084 INFO [RS:1;b4ac66777750:41225 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:17:27,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:17:27,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b4ac66777750,41225,1733149029599] 2024-12-02T14:17:27,089 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b4ac66777750,41225,1733149029599 already deleted, retry=false 2024-12-02T14:17:27,089 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of b4ac66777750,41225,1733149029599 on b4ac66777750,42687,1733149028802 2024-12-02T14:17:27,094 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure b4ac66777750,41225,1733149029599, splitWal=true, meta=false 2024-12-02T14:17:27,096 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for b4ac66777750,41225,1733149029599 (carryingMeta=false) b4ac66777750,41225,1733149029599/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@69334bb6[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-12-02T14:17:27,097 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure b4ac66777750,41225,1733149029599, splitWal=true, meta=false 2024-12-02T14:17:27,099 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(207): b4ac66777750,41225,1733149029599 had 1 regions 2024-12-02T14:17:27,101 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure b4ac66777750,41225,1733149029599, splitWal=true, meta=false, isMeta: false 2024-12-02T14:17:27,102 DEBUG [PEWorker-1 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting 2024-12-02T14:17:27,104 INFO [PEWorker-1 {}] master.SplitWALManager(105): b4ac66777750,41225,1733149029599 WAL count=1, meta=false 2024-12-02T14:17:27,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure b4ac66777750%2C41225%2C1733149029599.1733149031392}] 2024-12-02T14:17:27,112 DEBUG [PEWorker-2 {}] master.SplitWALManager(158): Acquired split WAL worker=b4ac66777750,43009,1733149029645 2024-12-02T14:17:27,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure b4ac66777750%2C41225%2C1733149029599.1733149031392, worker=b4ac66777750,43009,1733149029645}] 2024-12-02T14:17:27,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:27,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41225-0x1009b59793c0002, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:27,189 INFO [RS:1;b4ac66777750:41225 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:17:27,189 INFO [RS:1;b4ac66777750:41225 {}] regionserver.HRegionServer(1031): Exiting; stopping=b4ac66777750,41225,1733149029599; zookeeper connection closed. 2024-12-02T14:17:27,190 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@617dfc0d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@617dfc0d 2024-12-02T14:17:27,192 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18] 2024-12-02T14:17:27,194 WARN [RPCClient-NioEventLoopGroup-6-5 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server b4ac66777750:41225 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: b4ac66777750/172.17.0.3:41225 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:27,195 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18, error=java.net.ConnectException: Call to address=b4ac66777750:41225 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: b4ac66777750/172.17.0.3:41225 2024-12-02T14:17:27,195 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18 is java.net.ConnectException: Connection refused 2024-12-02T14:17:27,195 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18 from cache 2024-12-02T14:17:27,195 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.FailedServers(52): Added failed server with address b4ac66777750:41225 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: b4ac66777750/172.17.0.3:41225 2024-12-02T14:17:27,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43009 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-12-02T14:17:27,305 INFO [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392, size=1.4 K (1404bytes) 2024-12-02T14:17:27,305 INFO [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392 2024-12-02T14:17:27,306 INFO [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392 after 0ms 2024-12-02T14:17:27,308 DEBUG [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:27,309 INFO [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392 took 5ms 2024-12-02T14:17:27,316 DEBUG [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 0738ea0faaf2c5867685e891599fe105: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-12-02T14:17:27,316 DEBUG [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392 so closing down 2024-12-02T14:17:27,316 DEBUG [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:27,316 INFO [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:27,317 INFO [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 8 ms; skipped=6; WAL=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392, size=1.4 K, length=1404, corrupted=false, cancelled=false 2024-12-02T14:17:27,317 DEBUG [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392, journal: Splitting hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392, size=1.4 K (1404bytes) at 1733149047305Finishing writing output for hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392 so closing down at 1733149047316 (+11 ms)3 split writer threads finished at 1733149047317 (+1 ms)Processed 6 edits across 0 Regions in 8 ms; skipped=6; WAL=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392, size=1.4 K, length=1404, corrupted=false, cancelled=false at 1733149047317 2024-12-02T14:17:27,317 DEBUG [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392 2024-12-02T14:17:27,318 DEBUG [RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-02T14:17:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42687 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-12-02T14:17:27,325 INFO [PEWorker-3 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting/b4ac66777750%2C41225%2C1733149029599.1733149031392 to hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs 2024-12-02T14:17:27,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-02T14:17:27,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure b4ac66777750%2C41225%2C1733149029599.1733149031392, worker=b4ac66777750,43009,1733149029645 in 211 msec 2024-12-02T14:17:27,329 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=b4ac66777750,43009,1733149029645 2024-12-02T14:17:27,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-02T14:17:27,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure b4ac66777750%2C41225%2C1733149029599.1733149031392, worker=b4ac66777750,43009,1733149029645 in 225 msec 2024-12-02T14:17:27,337 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting dir is empty, no logs to split. 2024-12-02T14:17:27,337 INFO [PEWorker-1 {}] master.SplitWALManager(105): b4ac66777750,41225,1733149029599 WAL count=0, meta=false 2024-12-02T14:17:27,337 DEBUG [PEWorker-1 {}] procedure.ServerCrashProcedure(329): Check if b4ac66777750,41225,1733149029599 WAL splitting is done? wals=0, meta=false 2024-12-02T14:17:27,339 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(321): Remove WAL directory for b4ac66777750,41225,1733149029599 failed, ignore...File hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/WALs/b4ac66777750,41225,1733149029599-splitting does not exist. 2024-12-02T14:17:27,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN}] 2024-12-02T14:17:27,344 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN 2024-12-02T14:17:27,345 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-02T14:17:27,496 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(204): Hosts are {b4ac66777750=0} racks are {/default-rack=0} 2024-12-02T14:17:27,496 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T14:17:27,496 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T14:17:27,496 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T14:17:27,496 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T14:17:27,496 INFO [b4ac66777750:42687 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T14:17:27,496 INFO [b4ac66777750:42687 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T14:17:27,496 DEBUG [b4ac66777750:42687 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T14:17:27,497 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPENING, regionLocation=b4ac66777750,40955,1733149029496 2024-12-02T14:17:27,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN because future has completed 2024-12-02T14:17:27,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496}] 2024-12-02T14:17:27,501 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18] 2024-12-02T14:17:27,502 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.AbstractRpcClient(357): Not trying to connect to b4ac66777750:41225 this server is in the failed servers list 2024-12-02T14:17:27,503 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=b4ac66777750:41225 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: b4ac66777750:41225 2024-12-02T14:17:27,503 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: b4ac66777750:41225 2024-12-02T14:17:27,503 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,41225,1733149029599, seqNum=18 from cache 2024-12-02T14:17:27,658 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:27,659 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 0738ea0faaf2c5867685e891599fe105, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:27,659 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,659 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:27,659 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7794): checking encryption for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,659 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7797): checking classloading for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,661 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,662 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf1 2024-12-02T14:17:27,662 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:27,669 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf1/8a5b335b996246f68bd429a355ac2e81 2024-12-02T14:17:27,669 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:27,669 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,670 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0738ea0faaf2c5867685e891599fe105 columnFamilyName cf2 2024-12-02T14:17:27,670 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:27,678 DEBUG [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/cf2/4171494aafe34a7f932a78c2aa740180 2024-12-02T14:17:27,678 INFO [StoreOpener-0738ea0faaf2c5867685e891599fe105-1 {}] regionserver.HStore(327): Store=0738ea0faaf2c5867685e891599fe105/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:27,678 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1038): replaying wal for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,679 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,681 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,681 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1048): stopping wal replay for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,681 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,682 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-02T14:17:27,684 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1093): writing seq id for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,685 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1114): Opened 0738ea0faaf2c5867685e891599fe105; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74838844, jitterRate=0.1151856780052185}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-02T14:17:27,685 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:27,686 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1006): Region open journal for 0738ea0faaf2c5867685e891599fe105: Running coprocessor pre-open hook at 1733149047659Writing region info on filesystem at 1733149047659Initializing all the Stores at 1733149047660 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149047660Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149047660Cleaning up temporary data from old regions at 1733149047681 (+21 ms)Running coprocessor post-open hooks at 1733149047685 (+4 ms)Region opened successfully at 1733149047686 (+1 ms) 2024-12-02T14:17:27,687 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., pid=17, masterSystemTime=1733149047654 2024-12-02T14:17:27,689 DEBUG [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:27,689 INFO [RS_OPEN_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:27,690 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=0738ea0faaf2c5867685e891599fe105, regionState=OPEN, openSeqNum=18, regionLocation=b4ac66777750,40955,1733149029496 2024-12-02T14:17:27,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496 because future has completed 2024-12-02T14:17:27,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-12-02T14:17:27,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 0738ea0faaf2c5867685e891599fe105, server=b4ac66777750,40955,1733149029496 in 194 msec 2024-12-02T14:17:27,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-12-02T14:17:27,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=0738ea0faaf2c5867685e891599fe105, ASSIGN in 356 msec 2024-12-02T14:17:27,699 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(291): removed crashed server b4ac66777750,41225,1733149029599 after splitting done 2024-12-02T14:17:27,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure b4ac66777750,41225,1733149029599, splitWal=true, meta=false in 609 msec 2024-12-02T14:17:28,021 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105., hostname=b4ac66777750,40955,1733149029496, seqNum=18] 2024-12-02T14:17:28,037 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=405 (was 404) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_LOG_REPLAY_OPS-regionserver/b4ac66777750:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1949744955_22 at /127.0.0.1:55690 [Waiting for operation #23] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1699723724_22 at /127.0.0.1:46492 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b4ac66777750:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1699723724_22 at /127.0.0.1:50640 [Waiting for operation #28] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1036 (was 1004) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=303 (was 303), ProcessCount=11 (was 11), AvailableMemoryMB=4389 (was 4520) 2024-12-02T14:17:28,039 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1036 is superior to 1024 2024-12-02T14:17:28,051 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=405, OpenFileDescriptor=1036, MaxFileDescriptor=1048576, SystemLoadAverage=303, ProcessCount=11, AvailableMemoryMB=4388 2024-12-02T14:17:28,051 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1036 is superior to 1024 2024-12-02T14:17:28,067 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:28,068 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:28,069 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:28,071 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-61416214, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-61416214, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:28,084 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-61416214/hregion-61416214.1733149048071, exclude list is [], retry=0 2024-12-02T14:17:28,087 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:28,087 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:28,087 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:28,089 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-61416214/hregion-61416214.1733149048071 2024-12-02T14:17:28,089 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:28,090 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 9307df2fefcf228779bafda51b236a2c, NAME => 'testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:28,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741893_1071 (size=67) 2024-12-02T14:17:28,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741893_1071 (size=67) 2024-12-02T14:17:28,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741893_1071 (size=67) 2024-12-02T14:17:28,099 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:28,100 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,101 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName a 2024-12-02T14:17:28,102 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,102 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,102 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,103 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName b 2024-12-02T14:17:28,103 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,104 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,104 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,105 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName c 2024-12-02T14:17:28,105 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,106 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,106 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,106 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,107 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,108 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,108 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,109 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:28,110 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,112 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:28,112 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 9307df2fefcf228779bafda51b236a2c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62765927, jitterRate=-0.06471480429172516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:28,113 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 9307df2fefcf228779bafda51b236a2c: Writing region info on filesystem at 1733149048099Initializing all the Stores at 1733149048100 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048100Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048100Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048100Cleaning up temporary data from old regions at 1733149048108 (+8 ms)Region opened successfully at 1733149048113 (+5 ms) 2024-12-02T14:17:28,113 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 9307df2fefcf228779bafda51b236a2c, disabling compactions & flushes 2024-12-02T14:17:28,113 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,113 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,113 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. after waiting 0 ms 2024-12-02T14:17:28,113 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,113 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,113 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 9307df2fefcf228779bafda51b236a2c: Waiting for close lock at 1733149048113Disabling compacts and flushes for region at 1733149048113Disabling writes for close at 1733149048113Writing region close event to WAL at 1733149048113Closed at 1733149048113 2024-12-02T14:17:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741892_1070 (size=95) 2024-12-02T14:17:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741892_1070 (size=95) 2024-12-02T14:17:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741892_1070 (size=95) 2024-12-02T14:17:28,118 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:28,118 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-61416214:(num 1733149048071) 2024-12-02T14:17:28,118 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:28,120 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:28,133 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120, exclude list is [], retry=0 2024-12-02T14:17:28,135 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:28,135 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:28,135 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:28,137 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120 2024-12-02T14:17:28,138 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:28,139 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 9307df2fefcf228779bafda51b236a2c, NAME => 'testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:28,139 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:28,139 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,139 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,140 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,141 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName a 2024-12-02T14:17:28,141 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,142 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,142 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,142 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName b 2024-12-02T14:17:28,143 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,143 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,143 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,144 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName c 2024-12-02T14:17:28,144 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,144 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,144 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,145 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,147 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,148 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,148 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,148 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:28,150 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,150 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 9307df2fefcf228779bafda51b236a2c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73892020, jitterRate=0.10107690095901489}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:28,151 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 9307df2fefcf228779bafda51b236a2c: Writing region info on filesystem at 1733149048139Initializing all the Stores at 1733149048140 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048140Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048140Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048140Cleaning up temporary data from old regions at 1733149048148 (+8 ms)Region opened successfully at 1733149048151 (+3 ms) 2024-12-02T14:17:28,179 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 9307df2fefcf228779bafda51b236a2c 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-02T14:17:28,215 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/a/a963e276fcf147b7a2f810502f72477e is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733149048151/Put/seqid=0 2024-12-02T14:17:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741895_1073 (size=5958) 2024-12-02T14:17:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741895_1073 (size=5958) 2024-12-02T14:17:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741895_1073 (size=5958) 2024-12-02T14:17:28,228 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/a/a963e276fcf147b7a2f810502f72477e 2024-12-02T14:17:28,255 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/b/9e55b93aacbd4cef98944366c0d632bb is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733149048159/Put/seqid=0 2024-12-02T14:17:28,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741896_1074 (size=5958) 2024-12-02T14:17:28,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741896_1074 (size=5958) 2024-12-02T14:17:28,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741896_1074 (size=5958) 2024-12-02T14:17:28,264 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/b/9e55b93aacbd4cef98944366c0d632bb 2024-12-02T14:17:28,284 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/c/e111de4dca634596ba9476132cf1c902 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733149048166/Put/seqid=0 2024-12-02T14:17:28,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741897_1075 (size=5958) 2024-12-02T14:17:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741897_1075 (size=5958) 2024-12-02T14:17:28,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741897_1075 (size=5958) 2024-12-02T14:17:28,298 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/c/e111de4dca634596ba9476132cf1c902 2024-12-02T14:17:28,305 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/a/a963e276fcf147b7a2f810502f72477e as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/a/a963e276fcf147b7a2f810502f72477e 2024-12-02T14:17:28,310 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/a/a963e276fcf147b7a2f810502f72477e, entries=10, sequenceid=33, filesize=5.8 K 2024-12-02T14:17:28,311 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/b/9e55b93aacbd4cef98944366c0d632bb as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/b/9e55b93aacbd4cef98944366c0d632bb 2024-12-02T14:17:28,316 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/b/9e55b93aacbd4cef98944366c0d632bb, entries=10, sequenceid=33, filesize=5.8 K 2024-12-02T14:17:28,317 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/c/e111de4dca634596ba9476132cf1c902 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/c/e111de4dca634596ba9476132cf1c902 2024-12-02T14:17:28,322 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/c/e111de4dca634596ba9476132cf1c902, entries=10, sequenceid=33, filesize=5.8 K 2024-12-02T14:17:28,324 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 9307df2fefcf228779bafda51b236a2c in 145ms, sequenceid=33, compaction requested=false 2024-12-02T14:17:28,324 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9307df2fefcf228779bafda51b236a2c: 2024-12-02T14:17:28,324 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 9307df2fefcf228779bafda51b236a2c, disabling compactions & flushes 2024-12-02T14:17:28,324 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,324 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,324 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. after waiting 0 ms 2024-12-02T14:17:28,324 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,325 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. 2024-12-02T14:17:28,326 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 9307df2fefcf228779bafda51b236a2c: Waiting for close lock at 1733149048324Disabling compacts and flushes for region at 1733149048324Disabling writes for close at 1733149048324Writing region close event to WAL at 1733149048325 (+1 ms)Closed at 1733149048325 2024-12-02T14:17:28,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741894_1072 (size=3386) 2024-12-02T14:17:28,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741894_1072 (size=3386) 2024-12-02T14:17:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741894_1072 (size=3386) 2024-12-02T14:17:28,335 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/b/9e55b93aacbd4cef98944366c0d632bb to hdfs://localhost:42525/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/b/9e55b93aacbd4cef98944366c0d632bb 2024-12-02T14:17:28,360 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120, size=3.3 K (3386bytes) 2024-12-02T14:17:28,361 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120 2024-12-02T14:17:28,361 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120 after 0ms 2024-12-02T14:17:28,364 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:28,364 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120 took 4ms 2024-12-02T14:17:28,367 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120 so closing down 2024-12-02T14:17:28,367 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:28,368 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733149048120.temp 2024-12-02T14:17:28,369 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000003-wal.1733149048120.temp 2024-12-02T14:17:28,372 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:28,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741898_1076 (size=2944) 2024-12-02T14:17:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741898_1076 (size=2944) 2024-12-02T14:17:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741898_1076 (size=2944) 2024-12-02T14:17:28,390 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000003-wal.1733149048120.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:28,393 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000003-wal.1733149048120.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000032 2024-12-02T14:17:28,393 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 29 ms; skipped=2; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120, size=3.3 K, length=3386, corrupted=false, cancelled=false 2024-12-02T14:17:28,393 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120, journal: Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120, size=3.3 K (3386bytes) at 1733149048361Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120 so closing down at 1733149048367 (+6 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000003-wal.1733149048120.temp at 1733149048369 (+2 ms)3 split writer threads finished at 1733149048372 (+3 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000003-wal.1733149048120.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733149048390 (+18 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000003-wal.1733149048120.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000032 at 1733149048393 (+3 ms)Processed 32 edits across 1 Regions in 29 ms; skipped=2; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120, size=3.3 K, length=3386, corrupted=false, cancelled=false at 1733149048393 2024-12-02T14:17:28,396 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048120 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149048120 2024-12-02T14:17:28,397 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000032 2024-12-02T14:17:28,397 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:28,399 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:28,421 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048400, exclude list is [], retry=0 2024-12-02T14:17:28,426 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:28,427 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:28,428 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:28,438 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048400 2024-12-02T14:17:28,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:28,439 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 9307df2fefcf228779bafda51b236a2c, NAME => 'testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:28,439 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:28,439 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,439 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,442 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,443 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName a 2024-12-02T14:17:28,443 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,451 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/a/a963e276fcf147b7a2f810502f72477e 2024-12-02T14:17:28,451 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,451 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,453 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName b 2024-12-02T14:17:28,453 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,453 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,453 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,454 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9307df2fefcf228779bafda51b236a2c columnFamilyName c 2024-12-02T14:17:28,455 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741834_1010 (size=1404) 2024-12-02T14:17:28,473 DEBUG [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/c/e111de4dca634596ba9476132cf1c902 2024-12-02T14:17:28,473 INFO [StoreOpener-9307df2fefcf228779bafda51b236a2c-1 {}] regionserver.HStore(327): Store=9307df2fefcf228779bafda51b236a2c/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,473 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,474 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,475 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,475 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000032 2024-12-02T14:17:28,477 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:28,479 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000032 2024-12-02T14:17:28,480 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 9307df2fefcf228779bafda51b236a2c 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-02T14:17:28,495 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/b/a4b5884d7c274608b840731169351092 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733149048159/Put/seqid=0 2024-12-02T14:17:28,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741900_1078 (size=5958) 2024-12-02T14:17:28,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741900_1078 (size=5958) 2024-12-02T14:17:28,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741900_1078 (size=5958) 2024-12-02T14:17:28,502 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/b/a4b5884d7c274608b840731169351092 2024-12-02T14:17:28,509 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/.tmp/b/a4b5884d7c274608b840731169351092 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/b/a4b5884d7c274608b840731169351092 2024-12-02T14:17:28,514 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/b/a4b5884d7c274608b840731169351092, entries=10, sequenceid=32, filesize=5.8 K 2024-12-02T14:17:28,515 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 9307df2fefcf228779bafda51b236a2c in 36ms, sequenceid=32, compaction requested=false; wal=null 2024-12-02T14:17:28,515 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/0000000000000000032 2024-12-02T14:17:28,516 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,517 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,517 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:28,519 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 9307df2fefcf228779bafda51b236a2c 2024-12-02T14:17:28,521 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/9307df2fefcf228779bafda51b236a2c/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-12-02T14:17:28,522 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 9307df2fefcf228779bafda51b236a2c; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63434329, jitterRate=-0.054754838347435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:28,523 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 9307df2fefcf228779bafda51b236a2c: Writing region info on filesystem at 1733149048440Initializing all the Stores at 1733149048441 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048441Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048442 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048442Obtaining lock to block concurrent updates at 1733149048480 (+38 ms)Preparing flush snapshotting stores in 9307df2fefcf228779bafda51b236a2c at 1733149048480Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1733149048480Flushing stores of testReplayEditsWrittenViaHRegion,,1733149048067.9307df2fefcf228779bafda51b236a2c. at 1733149048480Flushing 9307df2fefcf228779bafda51b236a2c/b: creating writer at 1733149048480Flushing 9307df2fefcf228779bafda51b236a2c/b: appending metadata at 1733149048494 (+14 ms)Flushing 9307df2fefcf228779bafda51b236a2c/b: closing flushed file at 1733149048494Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f6c040: reopening flushed file at 1733149048508 (+14 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 9307df2fefcf228779bafda51b236a2c in 36ms, sequenceid=32, compaction requested=false; wal=null at 1733149048515 (+7 ms)Cleaning up temporary data from old regions at 1733149048517 (+2 ms)Region opened successfully at 1733149048523 (+6 ms) 2024-12-02T14:17:28,550 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=414 (was 405) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:55690 [Waiting for operation #39] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:46492 [Waiting for operation #26] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:57232 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:44384 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:50640 [Waiting for operation #37] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:35474 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1112 (was 1036) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=303 (was 303), ProcessCount=11 (was 11), AvailableMemoryMB=4366 (was 4388) 2024-12-02T14:17:28,550 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1112 is superior to 1024 2024-12-02T14:17:28,564 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=414, OpenFileDescriptor=1112, MaxFileDescriptor=1048576, SystemLoadAverage=303, ProcessCount=11, AvailableMemoryMB=4365 2024-12-02T14:17:28,564 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1112 is superior to 1024 2024-12-02T14:17:28,580 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:28,582 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:28,583 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:28,585 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-33763863, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-33763863, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:28,598 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-33763863/hregion-33763863.1733149048585, exclude list is [], retry=0 2024-12-02T14:17:28,601 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:28,601 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:28,602 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:28,613 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-33763863/hregion-33763863.1733149048585 2024-12-02T14:17:28,613 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:28,613 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 2dea028465b2d0820cb1bf13af145dba, NAME => 'testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:28,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741902_1080 (size=68) 2024-12-02T14:17:28,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741902_1080 (size=68) 2024-12-02T14:17:28,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741902_1080 (size=68) 2024-12-02T14:17:28,624 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:28,625 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,627 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName a 2024-12-02T14:17:28,627 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,627 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,628 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,629 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName b 2024-12-02T14:17:28,629 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,630 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,630 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,631 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName c 2024-12-02T14:17:28,631 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,632 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,632 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,633 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,633 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,634 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,634 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,635 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:28,636 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,640 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:28,641 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2dea028465b2d0820cb1bf13af145dba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68390750, jitterRate=0.019101589918136597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:28,641 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2dea028465b2d0820cb1bf13af145dba: Writing region info on filesystem at 1733149048624Initializing all the Stores at 1733149048625 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048625Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048625Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048625Cleaning up temporary data from old regions at 1733149048634 (+9 ms)Region opened successfully at 1733149048641 (+7 ms) 2024-12-02T14:17:28,641 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2dea028465b2d0820cb1bf13af145dba, disabling compactions & flushes 2024-12-02T14:17:28,641 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:28,641 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:28,642 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. after waiting 0 ms 2024-12-02T14:17:28,642 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:28,642 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:28,642 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2dea028465b2d0820cb1bf13af145dba: Waiting for close lock at 1733149048641Disabling compacts and flushes for region at 1733149048641Disabling writes for close at 1733149048642 (+1 ms)Writing region close event to WAL at 1733149048642Closed at 1733149048642 2024-12-02T14:17:28,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741901_1079 (size=95) 2024-12-02T14:17:28,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741901_1079 (size=95) 2024-12-02T14:17:28,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741901_1079 (size=95) 2024-12-02T14:17:28,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:28,646 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-33763863:(num 1733149048585) 2024-12-02T14:17:28,647 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:28,648 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:28,663 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649, exclude list is [], retry=0 2024-12-02T14:17:28,665 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:28,665 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:28,666 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:28,668 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649 2024-12-02T14:17:28,669 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:28,729 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2dea028465b2d0820cb1bf13af145dba, NAME => 'testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:28,732 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,732 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:28,732 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,732 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,735 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,736 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName a 2024-12-02T14:17:28,736 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,736 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,737 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,738 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName b 2024-12-02T14:17:28,738 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,738 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,738 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,739 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName c 2024-12-02T14:17:28,739 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:28,740 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:28,740 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,741 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,742 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,743 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,743 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,743 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:28,745 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,746 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2dea028465b2d0820cb1bf13af145dba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60810255, jitterRate=-0.09385658800601959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:28,746 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:28,747 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2dea028465b2d0820cb1bf13af145dba: Running coprocessor pre-open hook at 1733149048733Writing region info on filesystem at 1733149048733Initializing all the Stores at 1733149048734 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048734Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048734Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149048734Cleaning up temporary data from old regions at 1733149048743 (+9 ms)Running coprocessor post-open hooks at 1733149048746 (+3 ms)Region opened successfully at 1733149048747 (+1 ms) 2024-12-02T14:17:28,762 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2dea028465b2d0820cb1bf13af145dba 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-12-02T14:17:28,763 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:29,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-12-02T14:17:29,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:29,238 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-12-02T14:17:29,238 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:29,239 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-12-02T14:17:29,239 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:29,241 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-12-02T14:17:29,241 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:29,242 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-12-02T14:17:29,242 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:29,764 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:30,541 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T14:17:30,764 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:31,765 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:32,766 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:33,766 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:34,767 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:35,767 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:36,768 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:37,769 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 2dea028465b2d0820cb1bf13af145dba/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:37,770 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2dea028465b2d0820cb1bf13af145dba: 2024-12-02T14:17:37,770 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:37,783 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2dea028465b2d0820cb1bf13af145dba: 2024-12-02T14:17:37,783 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-12-02T14:17:37,783 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2dea028465b2d0820cb1bf13af145dba, disabling compactions & flushes 2024-12-02T14:17:37,783 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:37,784 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:37,784 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. after waiting 0 ms 2024-12-02T14:17:37,784 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:37,784 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:37,784 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. 2024-12-02T14:17:37,784 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2dea028465b2d0820cb1bf13af145dba: Waiting for close lock at 1733149057783Running coprocessor pre-close hooks at 1733149057783Disabling compacts and flushes for region at 1733149057783Disabling writes for close at 1733149057784 (+1 ms)Writing region close event to WAL at 1733149057784Running coprocessor post-close hooks at 1733149057784Closed at 1733149057784 2024-12-02T14:17:37,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741903_1081 (size=2691) 2024-12-02T14:17:37,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741903_1081 (size=2691) 2024-12-02T14:17:37,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741903_1081 (size=2691) 2024-12-02T14:17:37,804 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649, size=2.6 K (2691bytes) 2024-12-02T14:17:37,804 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649 2024-12-02T14:17:37,804 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649 after 0ms 2024-12-02T14:17:37,807 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:37,807 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649 took 4ms 2024-12-02T14:17:37,809 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649 so closing down 2024-12-02T14:17:37,809 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:37,811 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1733149048649.temp 2024-12-02T14:17:37,812 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000004-wal.1733149048649.temp 2024-12-02T14:17:37,813 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:37,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741904_1082 (size=2094) 2024-12-02T14:17:37,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741904_1082 (size=2094) 2024-12-02T14:17:37,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741904_1082 (size=2094) 2024-12-02T14:17:37,823 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000004-wal.1733149048649.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:37,825 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000004-wal.1733149048649.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000026 2024-12-02T14:17:37,825 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649, size=2.6 K, length=2691, corrupted=false, cancelled=false 2024-12-02T14:17:37,825 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649, journal: Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649, size=2.6 K (2691bytes) at 1733149057804Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649 so closing down at 1733149057809 (+5 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000004-wal.1733149048649.temp at 1733149057812 (+3 ms)3 split writer threads finished at 1733149057813 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000004-wal.1733149048649.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1733149057823 (+10 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000004-wal.1733149048649.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000026 at 1733149057825 (+2 ms)Processed 23 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649, size=2.6 K, length=2691, corrupted=false, cancelled=false at 1733149057825 2024-12-02T14:17:37,827 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149048649 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149048649 2024-12-02T14:17:37,827 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000026 2024-12-02T14:17:37,828 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:37,829 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:37,844 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149057830, exclude list is [], retry=0 2024-12-02T14:17:37,847 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:37,847 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:37,847 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:37,850 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149057830 2024-12-02T14:17:37,850 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:37,851 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2dea028465b2d0820cb1bf13af145dba, NAME => 'testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:37,851 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,852 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:37,852 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,852 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,854 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,855 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName a 2024-12-02T14:17:37,855 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:37,856 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:37,856 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,857 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName b 2024-12-02T14:17:37,857 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:37,858 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:37,858 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,858 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dea028465b2d0820cb1bf13af145dba columnFamilyName c 2024-12-02T14:17:37,859 DEBUG [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:37,859 INFO [StoreOpener-2dea028465b2d0820cb1bf13af145dba-1 {}] regionserver.HStore(327): Store=2dea028465b2d0820cb1bf13af145dba/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:37,859 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,860 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,861 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,862 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000026 2024-12-02T14:17:37,864 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:37,866 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000026 2024-12-02T14:17:37,866 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2dea028465b2d0820cb1bf13af145dba 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-12-02T14:17:37,882 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/a/b45c11315db34e04a51a59acf475c284 is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1733149057775/Put/seqid=0 2024-12-02T14:17:37,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741906_1084 (size=5523) 2024-12-02T14:17:37,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741906_1084 (size=5523) 2024-12-02T14:17:37,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741906_1084 (size=5523) 2024-12-02T14:17:37,890 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/a/b45c11315db34e04a51a59acf475c284 2024-12-02T14:17:37,911 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/b/03ed13e8636f4f2f8be55b4e8e613048 is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1733149057770/Put/seqid=0 2024-12-02T14:17:37,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:17:37,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741907_1085 (size=5524) 2024-12-02T14:17:37,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741907_1085 (size=5524) 2024-12-02T14:17:37,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741907_1085 (size=5524) 2024-12-02T14:17:37,918 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/b/03ed13e8636f4f2f8be55b4e8e613048 2024-12-02T14:17:37,946 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/c/93d539bc27864e03abbd73c82c076e93 is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1733149057773/Put/seqid=0 2024-12-02T14:17:37,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741908_1086 (size=5457) 2024-12-02T14:17:37,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741908_1086 (size=5457) 2024-12-02T14:17:37,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741908_1086 (size=5457) 2024-12-02T14:17:37,954 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/c/93d539bc27864e03abbd73c82c076e93 2024-12-02T14:17:37,960 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/a/b45c11315db34e04a51a59acf475c284 as hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/a/b45c11315db34e04a51a59acf475c284 2024-12-02T14:17:37,966 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/a/b45c11315db34e04a51a59acf475c284, entries=7, sequenceid=26, filesize=5.4 K 2024-12-02T14:17:37,967 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/b/03ed13e8636f4f2f8be55b4e8e613048 as hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/b/03ed13e8636f4f2f8be55b4e8e613048 2024-12-02T14:17:37,974 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/b/03ed13e8636f4f2f8be55b4e8e613048, entries=7, sequenceid=26, filesize=5.4 K 2024-12-02T14:17:37,975 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/.tmp/c/93d539bc27864e03abbd73c82c076e93 as hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/c/93d539bc27864e03abbd73c82c076e93 2024-12-02T14:17:37,981 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/c/93d539bc27864e03abbd73c82c076e93, entries=6, sequenceid=26, filesize=5.3 K 2024-12-02T14:17:37,981 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 2dea028465b2d0820cb1bf13af145dba in 115ms, sequenceid=26, compaction requested=false; wal=null 2024-12-02T14:17:37,982 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/0000000000000000026 2024-12-02T14:17:37,983 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,983 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,984 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:37,985 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,987 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsAfterAbortingFlush/2dea028465b2d0820cb1bf13af145dba/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-12-02T14:17:37,989 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2dea028465b2d0820cb1bf13af145dba; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62963299, jitterRate=-0.06177373230457306}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:37,989 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2dea028465b2d0820cb1bf13af145dba 2024-12-02T14:17:37,990 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2dea028465b2d0820cb1bf13af145dba: Running coprocessor pre-open hook at 1733149057852Writing region info on filesystem at 1733149057852Initializing all the Stores at 1733149057853 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149057853Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149057853Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149057853Obtaining lock to block concurrent updates at 1733149057866 (+13 ms)Preparing flush snapshotting stores in 2dea028465b2d0820cb1bf13af145dba at 1733149057866Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1733149057866Flushing stores of testReplayEditsAfterAbortingFlush,,1733149048581.2dea028465b2d0820cb1bf13af145dba. at 1733149057866Flushing 2dea028465b2d0820cb1bf13af145dba/a: creating writer at 1733149057866Flushing 2dea028465b2d0820cb1bf13af145dba/a: appending metadata at 1733149057882 (+16 ms)Flushing 2dea028465b2d0820cb1bf13af145dba/a: closing flushed file at 1733149057882Flushing 2dea028465b2d0820cb1bf13af145dba/b: creating writer at 1733149057896 (+14 ms)Flushing 2dea028465b2d0820cb1bf13af145dba/b: appending metadata at 1733149057911 (+15 ms)Flushing 2dea028465b2d0820cb1bf13af145dba/b: closing flushed file at 1733149057911Flushing 2dea028465b2d0820cb1bf13af145dba/c: creating writer at 1733149057924 (+13 ms)Flushing 2dea028465b2d0820cb1bf13af145dba/c: appending metadata at 1733149057945 (+21 ms)Flushing 2dea028465b2d0820cb1bf13af145dba/c: closing flushed file at 1733149057945Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@213635cf: reopening flushed file at 1733149057960 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26e81d17: reopening flushed file at 1733149057967 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6da719ff: reopening flushed file at 1733149057974 (+7 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 2dea028465b2d0820cb1bf13af145dba in 115ms, sequenceid=26, compaction requested=false; wal=null at 1733149057981 (+7 ms)Cleaning up temporary data from old regions at 1733149057983 (+2 ms)Running coprocessor post-open hooks at 1733149057989 (+6 ms)Region opened successfully at 1733149057990 (+1 ms) 2024-12-02T14:17:38,014 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=412 (was 414), OpenFileDescriptor=1170 (was 1112) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=294 (was 303), ProcessCount=11 (was 11), AvailableMemoryMB=4334 (was 4365) 2024-12-02T14:17:38,014 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1170 is superior to 1024 2024-12-02T14:17:38,027 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=412, OpenFileDescriptor=1170, MaxFileDescriptor=1048576, SystemLoadAverage=294, ProcessCount=11, AvailableMemoryMB=4333 2024-12-02T14:17:38,027 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1170 is superior to 1024 2024-12-02T14:17:38,043 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:38,045 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:38,046 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:38,048 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-43430944, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-43430944, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:38,061 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-43430944/hregion-43430944.1733149058049, exclude list is [], retry=0 2024-12-02T14:17:38,064 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:38,064 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:38,064 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:38,066 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-43430944/hregion-43430944.1733149058049 2024-12-02T14:17:38,067 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:38,067 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => d364f03a4f8e5646bfb8875eeed3d3b6, NAME => 'testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:38,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741910_1088 (size=61) 2024-12-02T14:17:38,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741910_1088 (size=61) 2024-12-02T14:17:38,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741910_1088 (size=61) 2024-12-02T14:17:38,077 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:38,078 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,079 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d364f03a4f8e5646bfb8875eeed3d3b6 columnFamilyName a 2024-12-02T14:17:38,079 DEBUG [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,080 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(327): Store=d364f03a4f8e5646bfb8875eeed3d3b6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,080 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,081 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,081 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,081 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,081 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,083 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,085 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:38,085 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d364f03a4f8e5646bfb8875eeed3d3b6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59912073, jitterRate=-0.10724054276943207}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:38,086 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d364f03a4f8e5646bfb8875eeed3d3b6: Writing region info on filesystem at 1733149058077Initializing all the Stores at 1733149058078 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058078Cleaning up temporary data from old regions at 1733149058081 (+3 ms)Region opened successfully at 1733149058086 (+5 ms) 2024-12-02T14:17:38,086 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d364f03a4f8e5646bfb8875eeed3d3b6, disabling compactions & flushes 2024-12-02T14:17:38,086 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,086 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,086 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. after waiting 0 ms 2024-12-02T14:17:38,086 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,087 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,087 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d364f03a4f8e5646bfb8875eeed3d3b6: Waiting for close lock at 1733149058086Disabling compacts and flushes for region at 1733149058086Disabling writes for close at 1733149058086Writing region close event to WAL at 1733149058086Closed at 1733149058087 (+1 ms) 2024-12-02T14:17:38,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741909_1087 (size=95) 2024-12-02T14:17:38,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741909_1087 (size=95) 2024-12-02T14:17:38,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741909_1087 (size=95) 2024-12-02T14:17:38,094 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:38,094 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-43430944:(num 1733149058049) 2024-12-02T14:17:38,094 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:38,096 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:38,109 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096, exclude list is [], retry=0 2024-12-02T14:17:38,112 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:38,112 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:38,113 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:38,115 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096 2024-12-02T14:17:38,115 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:38,115 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d364f03a4f8e5646bfb8875eeed3d3b6, NAME => 'testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:38,115 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:38,115 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,115 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,117 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,118 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d364f03a4f8e5646bfb8875eeed3d3b6 columnFamilyName a 2024-12-02T14:17:38,118 DEBUG [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,118 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(327): Store=d364f03a4f8e5646bfb8875eeed3d3b6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,118 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,119 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,120 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,120 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,120 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,122 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,123 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d364f03a4f8e5646bfb8875eeed3d3b6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60531585, jitterRate=-0.09800909459590912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:38,124 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d364f03a4f8e5646bfb8875eeed3d3b6: Writing region info on filesystem at 1733149058115Initializing all the Stores at 1733149058116 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058116Cleaning up temporary data from old regions at 1733149058120 (+4 ms)Region opened successfully at 1733149058123 (+3 ms) 2024-12-02T14:17:38,133 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d364f03a4f8e5646bfb8875eeed3d3b6, disabling compactions & flushes 2024-12-02T14:17:38,133 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,133 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,134 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. after waiting 0 ms 2024-12-02T14:17:38,134 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,134 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,134 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. 2024-12-02T14:17:38,134 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d364f03a4f8e5646bfb8875eeed3d3b6: Waiting for close lock at 1733149058133Disabling compacts and flushes for region at 1733149058133Disabling writes for close at 1733149058134 (+1 ms)Writing region close event to WAL at 1733149058134Closed at 1733149058134 2024-12-02T14:17:38,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741911_1089 (size=1050) 2024-12-02T14:17:38,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741911_1089 (size=1050) 2024-12-02T14:17:38,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741911_1089 (size=1050) 2024-12-02T14:17:38,157 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096, size=1.0 K (1050bytes) 2024-12-02T14:17:38,157 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096 2024-12-02T14:17:38,157 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096 after 0ms 2024-12-02T14:17:38,160 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:38,160 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096 took 4ms 2024-12-02T14:17:38,166 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096 so closing down 2024-12-02T14:17:38,166 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:38,167 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733149058096.temp 2024-12-02T14:17:38,169 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000003-wal.1733149058096.temp 2024-12-02T14:17:38,169 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:38,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741912_1090 (size=1050) 2024-12-02T14:17:38,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741912_1090 (size=1050) 2024-12-02T14:17:38,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741912_1090 (size=1050) 2024-12-02T14:17:38,178 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000003-wal.1733149058096.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:38,180 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000003-wal.1733149058096.temp to hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012 2024-12-02T14:17:38,180 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-12-02T14:17:38,180 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096, journal: Splitting hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096, size=1.0 K (1050bytes) at 1733149058157Finishing writing output for hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096 so closing down at 1733149058166 (+9 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000003-wal.1733149058096.temp at 1733149058169 (+3 ms)3 split writer threads finished at 1733149058169Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000003-wal.1733149058096.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1733149058178 (+9 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000003-wal.1733149058096.temp to hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012 at 1733149058180 (+2 ms)Processed 10 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1733149058180 2024-12-02T14:17:38,181 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058096 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149058096 2024-12-02T14:17:38,182 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012 2024-12-02T14:17:38,186 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:38,617 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:38,620 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:38,634 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058620, exclude list is [], retry=0 2024-12-02T14:17:38,636 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:38,637 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:38,637 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:38,639 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058620 2024-12-02T14:17:38,639 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:38,639 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d364f03a4f8e5646bfb8875eeed3d3b6, NAME => 'testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:38,639 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:38,639 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,639 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,643 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,645 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d364f03a4f8e5646bfb8875eeed3d3b6 columnFamilyName a 2024-12-02T14:17:38,645 DEBUG [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,646 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(327): Store=d364f03a4f8e5646bfb8875eeed3d3b6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,646 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,647 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,649 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,649 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012 2024-12-02T14:17:38,651 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:38,652 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012 2024-12-02T14:17:38,653 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d364f03a4f8e5646bfb8875eeed3d3b6 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-12-02T14:17:38,670 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/.tmp/a/1cb9b51087dd45e4ae0977e139a348a4 is 79, key is testDatalossWhenInputError/a:x0/1733149058124/Put/seqid=0 2024-12-02T14:17:38,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741914_1092 (size=5808) 2024-12-02T14:17:38,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741914_1092 (size=5808) 2024-12-02T14:17:38,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741914_1092 (size=5808) 2024-12-02T14:17:38,678 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/.tmp/a/1cb9b51087dd45e4ae0977e139a348a4 2024-12-02T14:17:38,686 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/.tmp/a/1cb9b51087dd45e4ae0977e139a348a4 as hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/a/1cb9b51087dd45e4ae0977e139a348a4 2024-12-02T14:17:38,694 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/a/1cb9b51087dd45e4ae0977e139a348a4, entries=10, sequenceid=12, filesize=5.7 K 2024-12-02T14:17:38,694 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for d364f03a4f8e5646bfb8875eeed3d3b6 in 42ms, sequenceid=12, compaction requested=false; wal=null 2024-12-02T14:17:38,694 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/0000000000000000012 2024-12-02T14:17:38,695 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,695 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,698 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,700 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-02T14:17:38,701 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d364f03a4f8e5646bfb8875eeed3d3b6; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69636292, jitterRate=0.037661612033843994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:38,702 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d364f03a4f8e5646bfb8875eeed3d3b6: Writing region info on filesystem at 1733149058640Initializing all the Stores at 1733149058642 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058642Obtaining lock to block concurrent updates at 1733149058653 (+11 ms)Preparing flush snapshotting stores in d364f03a4f8e5646bfb8875eeed3d3b6 at 1733149058653Finished memstore snapshotting testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1733149058653Flushing stores of testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6. at 1733149058653Flushing d364f03a4f8e5646bfb8875eeed3d3b6/a: creating writer at 1733149058653Flushing d364f03a4f8e5646bfb8875eeed3d3b6/a: appending metadata at 1733149058670 (+17 ms)Flushing d364f03a4f8e5646bfb8875eeed3d3b6/a: closing flushed file at 1733149058670Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56313810: reopening flushed file at 1733149058685 (+15 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for d364f03a4f8e5646bfb8875eeed3d3b6 in 42ms, sequenceid=12, compaction requested=false; wal=null at 1733149058694 (+9 ms)Cleaning up temporary data from old regions at 1733149058695 (+1 ms)Region opened successfully at 1733149058702 (+7 ms) 2024-12-02T14:17:38,705 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d364f03a4f8e5646bfb8875eeed3d3b6, NAME => 'testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:38,705 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733149058044.d364f03a4f8e5646bfb8875eeed3d3b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:38,705 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,705 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,706 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,707 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d364f03a4f8e5646bfb8875eeed3d3b6 columnFamilyName a 2024-12-02T14:17:38,707 DEBUG [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,713 DEBUG [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/a/1cb9b51087dd45e4ae0977e139a348a4 2024-12-02T14:17:38,713 INFO [StoreOpener-d364f03a4f8e5646bfb8875eeed3d3b6-1 {}] regionserver.HStore(327): Store=d364f03a4f8e5646bfb8875eeed3d3b6/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,713 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,714 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,715 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,715 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,715 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,717 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d364f03a4f8e5646bfb8875eeed3d3b6 2024-12-02T14:17:38,720 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testDatalossWhenInputError/d364f03a4f8e5646bfb8875eeed3d3b6/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-12-02T14:17:38,721 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d364f03a4f8e5646bfb8875eeed3d3b6; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69222648, jitterRate=0.031497836112976074}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:17:38,721 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d364f03a4f8e5646bfb8875eeed3d3b6: Writing region info on filesystem at 1733149058705Initializing all the Stores at 1733149058706 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058706Cleaning up temporary data from old regions at 1733149058715 (+9 ms)Region opened successfully at 1733149058721 (+6 ms) 2024-12-02T14:17:38,743 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=422 (was 412) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:47792 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:47722 [Waiting for operation #26] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:33570 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:33682 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:33550 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1615503913-172.17.0.3-1733149025365:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1252 (was 1170) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=294 (was 294), ProcessCount=11 (was 11), AvailableMemoryMB=4255 (was 4333) 2024-12-02T14:17:38,743 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1252 is superior to 1024 2024-12-02T14:17:38,757 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=422, OpenFileDescriptor=1252, MaxFileDescriptor=1048576, SystemLoadAverage=294, ProcessCount=11, AvailableMemoryMB=4250 2024-12-02T14:17:38,757 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1252 is superior to 1024 2024-12-02T14:17:38,779 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:38,782 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:38,783 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:38,785 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-22880848, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-22880848, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:38,806 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-22880848/hregion-22880848.1733149058786, exclude list is [], retry=0 2024-12-02T14:17:38,810 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:38,812 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:38,817 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:38,819 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-22880848/hregion-22880848.1733149058786 2024-12-02T14:17:38,820 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:38,820 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 10ff7749bd5b2c4d371ecd0333610d52, NAME => 'testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:38,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741916_1094 (size=63) 2024-12-02T14:17:38,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741916_1094 (size=63) 2024-12-02T14:17:38,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741916_1094 (size=63) 2024-12-02T14:17:38,856 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:38,857 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,859 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName a 2024-12-02T14:17:38,859 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,859 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,859 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,860 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName b 2024-12-02T14:17:38,861 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,861 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,861 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,862 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName c 2024-12-02T14:17:38,862 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,863 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,863 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,863 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,864 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,865 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,865 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,866 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:38,866 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,868 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:38,869 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 10ff7749bd5b2c4d371ecd0333610d52; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59822614, jitterRate=-0.10857358574867249}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:38,869 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 10ff7749bd5b2c4d371ecd0333610d52: Writing region info on filesystem at 1733149058856Initializing all the Stores at 1733149058857 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058857Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058857Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058857Cleaning up temporary data from old regions at 1733149058865 (+8 ms)Region opened successfully at 1733149058869 (+4 ms) 2024-12-02T14:17:38,869 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 10ff7749bd5b2c4d371ecd0333610d52, disabling compactions & flushes 2024-12-02T14:17:38,869 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:38,869 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:38,869 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. after waiting 0 ms 2024-12-02T14:17:38,869 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:38,870 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:38,870 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 10ff7749bd5b2c4d371ecd0333610d52: Waiting for close lock at 1733149058869Disabling compacts and flushes for region at 1733149058869Disabling writes for close at 1733149058869Writing region close event to WAL at 1733149058870 (+1 ms)Closed at 1733149058870 2024-12-02T14:17:38,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741915_1093 (size=95) 2024-12-02T14:17:38,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741915_1093 (size=95) 2024-12-02T14:17:38,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741915_1093 (size=95) 2024-12-02T14:17:38,874 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:38,874 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-22880848:(num 1733149058786) 2024-12-02T14:17:38,874 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:38,876 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:38,889 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876, exclude list is [], retry=0 2024-12-02T14:17:38,891 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:38,892 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:38,892 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:38,894 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 2024-12-02T14:17:38,894 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:38,894 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 10ff7749bd5b2c4d371ecd0333610d52, NAME => 'testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:38,894 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:38,894 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,894 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,895 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,896 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName a 2024-12-02T14:17:38,896 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,897 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,897 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,897 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName b 2024-12-02T14:17:38,897 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,898 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,898 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,898 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName c 2024-12-02T14:17:38,898 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:38,899 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:38,899 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,900 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,900 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,901 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,901 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,902 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:38,903 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:38,904 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 10ff7749bd5b2c4d371ecd0333610d52; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70093869, jitterRate=0.04448004066944122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:38,904 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 10ff7749bd5b2c4d371ecd0333610d52: Writing region info on filesystem at 1733149058894Initializing all the Stores at 1733149058895 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058895Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058895Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149058895Cleaning up temporary data from old regions at 1733149058901 (+6 ms)Region opened successfully at 1733149058904 (+3 ms) 2024-12-02T14:17:38,908 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1733149058907/Put/seqid=0 2024-12-02T14:17:38,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741918_1096 (size=4875) 2024-12-02T14:17:38,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741918_1096 (size=4875) 2024-12-02T14:17:38,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741918_1096 (size=4875) 2024-12-02T14:17:38,915 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1733149058915/Put/seqid=0 2024-12-02T14:17:38,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741919_1097 (size=4875) 2024-12-02T14:17:38,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741919_1097 (size=4875) 2024-12-02T14:17:38,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741919_1097 (size=4875) 2024-12-02T14:17:38,923 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1733149058923/Put/seqid=0 2024-12-02T14:17:38,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741920_1098 (size=4875) 2024-12-02T14:17:38,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741920_1098 (size=4875) 2024-12-02T14:17:38,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741920_1098 (size=4875) 2024-12-02T14:17:38,929 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 10ff7749bd5b2c4d371ecd0333610d52/a 2024-12-02T14:17:38,933 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-12-02T14:17:38,933 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-02T14:17:38,933 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 10ff7749bd5b2c4d371ecd0333610d52/a 2024-12-02T14:17:38,936 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-12-02T14:17:38,936 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-02T14:17:38,936 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 10ff7749bd5b2c4d371ecd0333610d52/a 2024-12-02T14:17:38,939 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-12-02T14:17:38,939 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-02T14:17:38,939 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 10ff7749bd5b2c4d371ecd0333610d52 3/3 column families, dataSize=51 B heapSize=896 B 2024-12-02T14:17:38,955 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/.tmp/a/c35722d1022d464cad6f6ecee9cb6e98 is 55, key is testCompactedBulkLoadedFiles/a:a/1733149058904/Put/seqid=0 2024-12-02T14:17:38,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741921_1099 (size=5107) 2024-12-02T14:17:38,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741921_1099 (size=5107) 2024-12-02T14:17:38,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741921_1099 (size=5107) 2024-12-02T14:17:38,963 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/.tmp/a/c35722d1022d464cad6f6ecee9cb6e98 2024-12-02T14:17:38,970 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/.tmp/a/c35722d1022d464cad6f6ecee9cb6e98 as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98 2024-12-02T14:17:38,974 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98, entries=1, sequenceid=4, filesize=5.0 K 2024-12-02T14:17:38,975 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 10ff7749bd5b2c4d371ecd0333610d52 in 36ms, sequenceid=4, compaction requested=false 2024-12-02T14:17:38,975 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 10ff7749bd5b2c4d371ecd0333610d52: 2024-12-02T14:17:38,976 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_ 2024-12-02T14:17:38,978 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_ 2024-12-02T14:17:38,978 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_ 2024-12-02T14:17:38,979 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile0 into 10ff7749bd5b2c4d371ecd0333610d52/a as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_ - updating store file list. 2024-12-02T14:17:38,983 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for cb784df272024f4ea4f0d18e53964cde_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:38,983 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_ into 10ff7749bd5b2c4d371ecd0333610d52/a 2024-12-02T14:17:38,983 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile0 into 10ff7749bd5b2c4d371ecd0333610d52/a (new location: hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_) 2024-12-02T14:17:38,983 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile1 into 10ff7749bd5b2c4d371ecd0333610d52/a as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_ - updating store file list. 2024-12-02T14:17:38,987 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:38,987 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_ into 10ff7749bd5b2c4d371ecd0333610d52/a 2024-12-02T14:17:38,987 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile1 into 10ff7749bd5b2c4d371ecd0333610d52/a (new location: hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_) 2024-12-02T14:17:38,988 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile2 into 10ff7749bd5b2c4d371ecd0333610d52/a as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_ - updating store file list. 2024-12-02T14:17:38,992 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:38,992 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_ into 10ff7749bd5b2c4d371ecd0333610d52/a 2024-12-02T14:17:38,992 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42525/hbase/testCompactedBulkLoadedFiles/hfile2 into 10ff7749bd5b2c4d371ecd0333610d52/a (new location: hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_) 2024-12-02T14:17:38,999 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T14:17:38,999 DEBUG [Time-limited test {}] regionserver.HStore(1541): 10ff7749bd5b2c4d371ecd0333610d52/a is initiating major compaction (all files) 2024-12-02T14:17:38,999 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 10ff7749bd5b2c4d371ecd0333610d52/a in testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:38,999 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_] into tmpdir=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/.tmp, totalSize=19.3 K 2024-12-02T14:17:39,000 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c35722d1022d464cad6f6ecee9cb6e98, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1733149058904 2024-12-02T14:17:39,000 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting cb784df272024f4ea4f0d18e53964cde_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-02T14:17:39,001 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-02T14:17:39,001 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-02T14:17:39,012 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/.tmp/a/dd0415d00f2b47c4ab98650304708368 is 55, key is testCompactedBulkLoadedFiles/a:a/1733149058904/Put/seqid=0 2024-12-02T14:17:39,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741922_1100 (size=6154) 2024-12-02T14:17:39,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741922_1100 (size=6154) 2024-12-02T14:17:39,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741922_1100 (size=6154) 2024-12-02T14:17:39,025 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/.tmp/a/dd0415d00f2b47c4ab98650304708368 as hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/dd0415d00f2b47c4ab98650304708368 2024-12-02T14:17:39,031 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 10ff7749bd5b2c4d371ecd0333610d52/a of 10ff7749bd5b2c4d371ecd0333610d52 into dd0415d00f2b47c4ab98650304708368(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:17:39,032 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 10ff7749bd5b2c4d371ecd0333610d52: 2024-12-02T14:17:39,032 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-02T14:17:39,032 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-02T14:17:39,071 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876, size=0 (0bytes) 2024-12-02T14:17:39,071 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 might be still open, length is 0 2024-12-02T14:17:39,071 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 2024-12-02T14:17:39,072 WARN [IPC Server handler 3 on default port 42525 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 has not been closed. Lease recovery is in progress. RecoveryId = 1101 for block blk_1073741917_1095 2024-12-02T14:17:39,073 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 after 2ms 2024-12-02T14:17:39,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-12-02T14:17:39,237 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:39,238 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-12-02T14:17:39,238 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-12-02T14:17:40,701 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:47820 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:39417:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47820 dst: /127.0.0.1:39417 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39417 remote=/127.0.0.1:47820]. Total timeout mills is 60000, 58331 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:40,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:33598 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33598 dst: /127.0.0.1:45771 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:40,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:33720 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:44963:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33720 dst: /127.0.0.1:44963 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:40,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741917_1101 (size=1173) 2024-12-02T14:17:40,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741917_1101 (size=1173) 2024-12-02T14:17:40,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741917_1101 (size=1173) 2024-12-02T14:17:43,073 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 after 4002ms 2024-12-02T14:17:43,076 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:43,076 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 took 4005ms 2024-12-02T14:17:43,078 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876; continuing. 2024-12-02T14:17:43,078 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 so closing down 2024-12-02T14:17:43,078 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:43,080 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733149058876.temp 2024-12-02T14:17:43,081 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000003-wal.1733149058876.temp 2024-12-02T14:17:43,081 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:43,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741923_1102 (size=548) 2024-12-02T14:17:43,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741923_1102 (size=548) 2024-12-02T14:17:43,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741923_1102 (size=548) 2024-12-02T14:17:43,095 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000003-wal.1733149058876.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:43,096 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000003-wal.1733149058876.temp to hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000008 2024-12-02T14:17:43,097 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 19 ms; skipped=3; WAL=hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876, size=0, length=0, corrupted=false, cancelled=false 2024-12-02T14:17:43,097 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876, journal: Splitting hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876, size=0 (0bytes) at 1733149059071Finishing writing output for hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 so closing down at 1733149063078 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000003-wal.1733149058876.temp at 1733149063081 (+3 ms)3 split writer threads finished at 1733149063081Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000003-wal.1733149058876.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733149063095 (+14 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000003-wal.1733149058876.temp to hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000008 at 1733149063096 (+1 ms)Processed 5 edits across 1 Regions in 19 ms; skipped=3; WAL=hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876, size=0, length=0, corrupted=false, cancelled=false at 1733149063097 (+1 ms) 2024-12-02T14:17:43,098 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149058876 2024-12-02T14:17:43,099 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000008 2024-12-02T14:17:43,099 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:43,101 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:43,114 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149063101, exclude list is [], retry=0 2024-12-02T14:17:43,116 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:43,117 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:43,117 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:43,118 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149063101 2024-12-02T14:17:43,119 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:43,119 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 10ff7749bd5b2c4d371ecd0333610d52, NAME => 'testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:43,119 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:43,119 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,119 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,120 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,121 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName a 2024-12-02T14:17:43,121 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,128 DEBUG [StoreFileOpener-10ff7749bd5b2c4d371ecd0333610d52-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:43,128 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_ 2024-12-02T14:17:43,131 DEBUG [StoreFileOpener-10ff7749bd5b2c4d371ecd0333610d52-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:43,131 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_ 2024-12-02T14:17:43,134 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98 2024-12-02T14:17:43,137 DEBUG [StoreFileOpener-10ff7749bd5b2c4d371ecd0333610d52-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for cb784df272024f4ea4f0d18e53964cde_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T14:17:43,137 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_ 2024-12-02T14:17:43,142 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/dd0415d00f2b47c4ab98650304708368 2024-12-02T14:17:43,142 WARN [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@593f9b13 2024-12-02T14:17:43,142 WARN [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@593f9b13 2024-12-02T14:17:43,142 WARN [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@593f9b13 2024-12-02T14:17:43,142 WARN [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@593f9b13 2024-12-02T14:17:43,142 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_] to archive 2024-12-02T14:17:43,143 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:17:43,144 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_ to hdfs://localhost:42525/hbase/archive/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_ 2024-12-02T14:17:43,145 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_ to hdfs://localhost:42525/hbase/archive/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_ 2024-12-02T14:17:43,146 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98 to hdfs://localhost:42525/hbase/archive/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/c35722d1022d464cad6f6ecee9cb6e98 2024-12-02T14:17:43,147 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_ to hdfs://localhost:42525/hbase/archive/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/a/cb784df272024f4ea4f0d18e53964cde_SeqId_4_ 2024-12-02T14:17:43,147 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,147 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,148 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName b 2024-12-02T14:17:43,148 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,149 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,149 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,149 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10ff7749bd5b2c4d371ecd0333610d52 columnFamilyName c 2024-12-02T14:17:43,149 DEBUG [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,150 INFO [StoreOpener-10ff7749bd5b2c4d371ecd0333610d52-1 {}] regionserver.HStore(327): Store=10ff7749bd5b2c4d371ecd0333610d52/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,150 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,151 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,152 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,152 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000008 2024-12-02T14:17:43,154 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:43,156 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 10ff7749bd5b2c4d371ecd0333610d52 : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "10ff7749bd5b2c4d371ecd0333610d52" family_name: "a" compaction_input: "c35722d1022d464cad6f6ecee9cb6e98" compaction_input: "cb784df272024f4ea4f0d18e53964cde_SeqId_4_" compaction_input: "5243750fb2d841ccbef30e2c5f6f5bf4_SeqId_4_" compaction_input: "90184fdbea424aa780f7adf2d0a46a4d_SeqId_4_" compaction_output: "dd0415d00f2b47c4ab98650304708368" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-12-02T14:17:43,156 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-12-02T14:17:43,156 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000008 2024-12-02T14:17:43,156 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/0000000000000000008 2024-12-02T14:17:43,157 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,157 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,158 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:43,159 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 10ff7749bd5b2c4d371ecd0333610d52 2024-12-02T14:17:43,164 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testCompactedBulkLoadedFiles/10ff7749bd5b2c4d371ecd0333610d52/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T14:17:43,165 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 10ff7749bd5b2c4d371ecd0333610d52; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63958757, jitterRate=-0.04694025218486786}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:43,166 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 10ff7749bd5b2c4d371ecd0333610d52: Writing region info on filesystem at 1733149063119Initializing all the Stores at 1733149063120 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063120Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063120Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063120Cleaning up temporary data from old regions at 1733149063157 (+37 ms)Region opened successfully at 1733149063166 (+9 ms) 2024-12-02T14:17:43,168 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 10ff7749bd5b2c4d371ecd0333610d52, disabling compactions & flushes 2024-12-02T14:17:43,168 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:43,168 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:43,168 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. after waiting 0 ms 2024-12-02T14:17:43,168 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:43,169 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733149058780.10ff7749bd5b2c4d371ecd0333610d52. 2024-12-02T14:17:43,169 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 10ff7749bd5b2c4d371ecd0333610d52: Waiting for close lock at 1733149063168Disabling compacts and flushes for region at 1733149063168Disabling writes for close at 1733149063168Writing region close event to WAL at 1733149063169 (+1 ms)Closed at 1733149063169 2024-12-02T14:17:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741924_1103 (size=95) 2024-12-02T14:17:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741924_1103 (size=95) 2024-12-02T14:17:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741924_1103 (size=95) 2024-12-02T14:17:43,174 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:43,174 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733149063101) 2024-12-02T14:17:43,192 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=434 (was 422) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1700279658_22 at /127.0.0.1:33774 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:42525 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42525 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1700279658_22 at /127.0.0.1:47906 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1334 (was 1252) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=278 (was 294), ProcessCount=11 (was 11), AvailableMemoryMB=4240 (was 4250) 2024-12-02T14:17:43,192 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1334 is superior to 1024 2024-12-02T14:17:43,210 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=434, OpenFileDescriptor=1334, MaxFileDescriptor=1048576, SystemLoadAverage=278, ProcessCount=11, AvailableMemoryMB=4236 2024-12-02T14:17:43,210 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1334 is superior to 1024 2024-12-02T14:17:43,232 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:43,235 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T14:17:43,235 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T14:17:43,238 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-97650862, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/hregion-97650862, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:43,258 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-97650862/hregion-97650862.1733149063239, exclude list is [], retry=0 2024-12-02T14:17:43,261 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:43,262 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:43,262 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:43,264 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-97650862/hregion-97650862.1733149063239 2024-12-02T14:17:43,264 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239)] 2024-12-02T14:17:43,264 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 4c1fe96f5952019ec9864465b4408dac, NAME => 'testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42525/hbase 2024-12-02T14:17:43,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741926_1105 (size=67) 2024-12-02T14:17:43,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741926_1105 (size=67) 2024-12-02T14:17:43,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741926_1105 (size=67) 2024-12-02T14:17:43,281 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:43,283 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,284 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName a 2024-12-02T14:17:43,284 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,285 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,285 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,286 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName b 2024-12-02T14:17:43,286 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,287 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,287 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,288 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName c 2024-12-02T14:17:43,288 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,289 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,289 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,290 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,290 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,291 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,291 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,292 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:43,293 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,295 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:17:43,296 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4c1fe96f5952019ec9864465b4408dac; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59127907, jitterRate=-0.11892552673816681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:43,296 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4c1fe96f5952019ec9864465b4408dac: Writing region info on filesystem at 1733149063281Initializing all the Stores at 1733149063282 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063282Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063283 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063283Cleaning up temporary data from old regions at 1733149063291 (+8 ms)Region opened successfully at 1733149063296 (+5 ms) 2024-12-02T14:17:43,296 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 4c1fe96f5952019ec9864465b4408dac, disabling compactions & flushes 2024-12-02T14:17:43,296 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,296 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,296 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. after waiting 0 ms 2024-12-02T14:17:43,296 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,297 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,297 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 4c1fe96f5952019ec9864465b4408dac: Waiting for close lock at 1733149063296Disabling compacts and flushes for region at 1733149063296Disabling writes for close at 1733149063296Writing region close event to WAL at 1733149063297 (+1 ms)Closed at 1733149063297 2024-12-02T14:17:43,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741925_1104 (size=95) 2024-12-02T14:17:43,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741925_1104 (size=95) 2024-12-02T14:17:43,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741925_1104 (size=95) 2024-12-02T14:17:43,302 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:43,302 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-97650862:(num 1733149063239) 2024-12-02T14:17:43,302 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:43,304 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:43,313 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T14:17:43,325 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305, exclude list is [], retry=0 2024-12-02T14:17:43,328 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:43,329 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:43,329 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:43,333 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305 2024-12-02T14:17:43,337 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:43,337 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 4c1fe96f5952019ec9864465b4408dac, NAME => 'testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:43,337 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:43,337 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,338 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,341 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,342 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName a 2024-12-02T14:17:43,342 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,343 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,343 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,343 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName b 2024-12-02T14:17:43,343 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,344 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,344 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,345 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName c 2024-12-02T14:17:43,345 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,345 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,345 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,346 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,347 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,348 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,348 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,349 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:43,350 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,351 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4c1fe96f5952019ec9864465b4408dac; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61752274, jitterRate=-0.07981941103935242}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:43,351 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4c1fe96f5952019ec9864465b4408dac: Writing region info on filesystem at 1733149063338Initializing all the Stores at 1733149063339 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063339Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063341 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063341Cleaning up temporary data from old regions at 1733149063348 (+7 ms)Region opened successfully at 1733149063351 (+3 ms) 2024-12-02T14:17:43,362 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4c1fe96f5952019ec9864465b4408dac 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-02T14:17:43,378 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/a/0d0d4fa9f9604e408c36a505f383b1ad is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733149063351/Put/seqid=0 2024-12-02T14:17:43,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741928_1107 (size=5958) 2024-12-02T14:17:43,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741928_1107 (size=5958) 2024-12-02T14:17:43,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741928_1107 (size=5958) 2024-12-02T14:17:43,386 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/a/0d0d4fa9f9604e408c36a505f383b1ad 2024-12-02T14:17:43,391 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/a/0d0d4fa9f9604e408c36a505f383b1ad as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/a/0d0d4fa9f9604e408c36a505f383b1ad 2024-12-02T14:17:43,396 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/a/0d0d4fa9f9604e408c36a505f383b1ad, entries=10, sequenceid=13, filesize=5.8 K 2024-12-02T14:17:43,398 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 4c1fe96f5952019ec9864465b4408dac in 37ms, sequenceid=13, compaction requested=false 2024-12-02T14:17:43,398 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4c1fe96f5952019ec9864465b4408dac: 2024-12-02T14:17:43,421 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 4c1fe96f5952019ec9864465b4408dac, disabling compactions & flushes 2024-12-02T14:17:43,421 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,421 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,421 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. after waiting 0 ms 2024-12-02T14:17:43,421 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,422 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,422 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:43,422 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 4c1fe96f5952019ec9864465b4408dac: Waiting for close lock at 1733149063421Disabling compacts and flushes for region at 1733149063421Disabling writes for close at 1733149063421Writing region close event to WAL at 1733149063422 (+1 ms)Closed at 1733149063422 2024-12-02T14:17:43,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741927_1106 (size=3346) 2024-12-02T14:17:43,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741927_1106 (size=3346) 2024-12-02T14:17:43,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741927_1106 (size=3346) 2024-12-02T14:17:43,449 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305, size=3.3 K (3346bytes) 2024-12-02T14:17:43,449 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305 2024-12-02T14:17:43,449 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305 after 0ms 2024-12-02T14:17:43,451 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:43,451 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305 took 3ms 2024-12-02T14:17:43,453 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305 so closing down 2024-12-02T14:17:43,453 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:43,455 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733149063305.temp 2024-12-02T14:17:43,456 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000003-wal.1733149063305.temp 2024-12-02T14:17:43,457 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741929_1108 (size=2944) 2024-12-02T14:17:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741929_1108 (size=2944) 2024-12-02T14:17:43,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741929_1108 (size=2944) 2024-12-02T14:17:43,463 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000003-wal.1733149063305.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:43,465 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000003-wal.1733149063305.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000035 2024-12-02T14:17:43,465 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 13 ms; skipped=2; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305, size=3.3 K, length=3346, corrupted=false, cancelled=false 2024-12-02T14:17:43,465 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305, journal: Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305, size=3.3 K (3346bytes) at 1733149063449Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305 so closing down at 1733149063453 (+4 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000003-wal.1733149063305.temp at 1733149063456 (+3 ms)3 split writer threads finished at 1733149063457 (+1 ms)Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000003-wal.1733149063305.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733149063463 (+6 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000003-wal.1733149063305.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000035 at 1733149063465 (+2 ms)Processed 32 edits across 1 Regions in 13 ms; skipped=2; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305, size=3.3 K, length=3346, corrupted=false, cancelled=false at 1733149063465 2024-12-02T14:17:43,467 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063305 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149063305 2024-12-02T14:17:43,468 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000035 2024-12-02T14:17:43,468 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:43,469 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:43,482 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470, exclude list is [], retry=0 2024-12-02T14:17:43,485 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:43,485 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:43,485 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:43,488 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 2024-12-02T14:17:43,488 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241),(127.0.0.1/127.0.0.1:43117:43117)] 2024-12-02T14:17:43,488 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 4c1fe96f5952019ec9864465b4408dac, NAME => 'testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:17:43,488 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:43,489 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,489 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,490 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,491 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName a 2024-12-02T14:17:43,491 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,498 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/a/0d0d4fa9f9604e408c36a505f383b1ad 2024-12-02T14:17:43,498 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,499 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,499 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName b 2024-12-02T14:17:43,499 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,500 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,500 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,500 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName c 2024-12-02T14:17:43,500 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:43,501 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:43,501 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,501 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,502 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,503 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000035 2024-12-02T14:17:43,504 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:43,505 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000035 2024-12-02T14:17:43,505 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4c1fe96f5952019ec9864465b4408dac 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-12-02T14:17:43,520 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/b/7c5f8bf504ad47879ed944c8a26d6986 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733149063398/Put/seqid=0 2024-12-02T14:17:43,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741931_1110 (size=5958) 2024-12-02T14:17:43,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741931_1110 (size=5958) 2024-12-02T14:17:43,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741931_1110 (size=5958) 2024-12-02T14:17:43,526 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/b/7c5f8bf504ad47879ed944c8a26d6986 2024-12-02T14:17:43,547 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/c/e71e5ee92823420cbb06c4a11686ecae is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733149063406/Put/seqid=0 2024-12-02T14:17:43,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741932_1111 (size=5958) 2024-12-02T14:17:43,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741932_1111 (size=5958) 2024-12-02T14:17:43,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741932_1111 (size=5958) 2024-12-02T14:17:43,555 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/c/e71e5ee92823420cbb06c4a11686ecae 2024-12-02T14:17:43,560 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/b/7c5f8bf504ad47879ed944c8a26d6986 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/b/7c5f8bf504ad47879ed944c8a26d6986 2024-12-02T14:17:43,564 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/b/7c5f8bf504ad47879ed944c8a26d6986, entries=10, sequenceid=35, filesize=5.8 K 2024-12-02T14:17:43,565 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/c/e71e5ee92823420cbb06c4a11686ecae as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/c/e71e5ee92823420cbb06c4a11686ecae 2024-12-02T14:17:43,569 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/c/e71e5ee92823420cbb06c4a11686ecae, entries=10, sequenceid=35, filesize=5.8 K 2024-12-02T14:17:43,570 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 4c1fe96f5952019ec9864465b4408dac in 64ms, sequenceid=35, compaction requested=false; wal=null 2024-12-02T14:17:43,570 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000035 2024-12-02T14:17:43,571 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,571 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,571 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:43,573 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:43,574 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-12-02T14:17:43,575 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4c1fe96f5952019ec9864465b4408dac; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60449950, jitterRate=-0.09922555088996887}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:43,575 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4c1fe96f5952019ec9864465b4408dac: Writing region info on filesystem at 1733149063489Initializing all the Stores at 1733149063490 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063490Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063490Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149063490Obtaining lock to block concurrent updates at 1733149063505 (+15 ms)Preparing flush snapshotting stores in 4c1fe96f5952019ec9864465b4408dac at 1733149063505Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1733149063506 (+1 ms)Flushing stores of testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. at 1733149063506Flushing 4c1fe96f5952019ec9864465b4408dac/b: creating writer at 1733149063506Flushing 4c1fe96f5952019ec9864465b4408dac/b: appending metadata at 1733149063519 (+13 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/b: closing flushed file at 1733149063519Flushing 4c1fe96f5952019ec9864465b4408dac/c: creating writer at 1733149063531 (+12 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/c: appending metadata at 1733149063547 (+16 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/c: closing flushed file at 1733149063547Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2356eda6: reopening flushed file at 1733149063559 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75667ba4: reopening flushed file at 1733149063564 (+5 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 4c1fe96f5952019ec9864465b4408dac in 64ms, sequenceid=35, compaction requested=false; wal=null at 1733149063570 (+6 ms)Cleaning up temporary data from old regions at 1733149063571 (+1 ms)Region opened successfully at 1733149063575 (+4 ms) 2024-12-02T14:17:43,638 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470, size=0 (0bytes) 2024-12-02T14:17:43,638 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 might be still open, length is 0 2024-12-02T14:17:43,638 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 2024-12-02T14:17:43,638 WARN [IPC Server handler 2 on default port 42525 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 has not been closed. Lease recovery is in progress. RecoveryId = 1112 for block blk_1073741930_1109 2024-12-02T14:17:43,639 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 after 1ms 2024-12-02T14:17:46,458 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:47952 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:39417:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47952 dst: /127.0.0.1:39417 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39417 remote=/127.0.0.1:47952]. Total timeout mills is 60000, 57144 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:46,458 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:33842 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:44963:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33842 dst: /127.0.0.1:44963 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:46,458 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2060961908_22 at /127.0.0.1:33700 [Receiving block BP-1615503913-172.17.0.3-1733149025365:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33700 dst: /127.0.0.1:45771 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:46,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741930_1112 (size=2936) 2024-12-02T14:17:46,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741930_1112 (size=2936) 2024-12-02T14:17:46,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741930_1112 (size=2936) 2024-12-02T14:17:47,639 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 after 4001ms 2024-12-02T14:17:47,642 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:47,642 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 took 4004ms 2024-12-02T14:17:47,644 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470; continuing. 2024-12-02T14:17:47,644 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 so closing down 2024-12-02T14:17:47,644 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-02T14:17:47,645 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1733149063470.temp 2024-12-02T14:17:47,647 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000037-wal.1733149063470.temp 2024-12-02T14:17:47,647 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-02T14:17:47,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741933_1113 (size=2944) 2024-12-02T14:17:47,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741933_1113 (size=2944) 2024-12-02T14:17:47,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741933_1113 (size=2944) 2024-12-02T14:17:47,654 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000037-wal.1733149063470.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-02T14:17:47,655 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000037-wal.1733149063470.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000066 2024-12-02T14:17:47,655 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 13 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470, size=0, length=0, corrupted=false, cancelled=false 2024-12-02T14:17:47,655 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470, journal: Splitting hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470, size=0 (0bytes) at 1733149063638Finishing writing output for hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 so closing down at 1733149067644 (+4006 ms)Creating recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000037-wal.1733149063470.temp at 1733149067647 (+3 ms)3 split writer threads finished at 1733149067647Closed recovered edits writer path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000037-wal.1733149063470.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733149067654 (+7 ms)Rename recovered edits hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000037-wal.1733149063470.temp to hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000066 at 1733149067655 (+1 ms)Processed 30 edits across 1 Regions in 13 ms; skipped=0; WAL=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470, size=0, length=0, corrupted=false, cancelled=false at 1733149067655 2024-12-02T14:17:47,657 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 to hdfs://localhost:42525/hbase/oldWALs/wal.1733149063470 2024-12-02T14:17:47,658 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000066 2024-12-02T14:17:47,658 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-02T14:17:47,660 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:42525/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231, archiveDir=hdfs://localhost:42525/hbase/oldWALs, maxLogs=32 2024-12-02T14:17:47,673 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149067660, exclude list is [], retry=0 2024-12-02T14:17:47,676 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45771,DS-077d37d8-abd7-40e8-87b1-2528b05f4c15,DISK] 2024-12-02T14:17:47,676 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39417,DS-9971975c-9085-4344-bd27-b0115609ff6d,DISK] 2024-12-02T14:17:47,676 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44963,DS-1184478a-879d-4379-ada9-9b95037f8ac0,DISK] 2024-12-02T14:17:47,678 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149067660 2024-12-02T14:17:47,678 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:36239:36239),(127.0.0.1/127.0.0.1:39241:39241)] 2024-12-02T14:17:47,678 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:17:47,679 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:47,680 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName a 2024-12-02T14:17:47,680 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:47,685 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/a/0d0d4fa9f9604e408c36a505f383b1ad 2024-12-02T14:17:47,685 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:47,685 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:47,685 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName b 2024-12-02T14:17:47,686 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:47,691 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/b/7c5f8bf504ad47879ed944c8a26d6986 2024-12-02T14:17:47,691 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:47,691 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:47,692 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1fe96f5952019ec9864465b4408dac columnFamilyName c 2024-12-02T14:17:47,692 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:17:47,697 DEBUG [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/c/e71e5ee92823420cbb06c4a11686ecae 2024-12-02T14:17:47,697 INFO [StoreOpener-4c1fe96f5952019ec9864465b4408dac-1 {}] regionserver.HStore(327): Store=4c1fe96f5952019ec9864465b4408dac/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:17:47,697 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:47,698 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:47,699 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:47,700 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000066 2024-12-02T14:17:47,702 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-02T14:17:47,707 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000066 2024-12-02T14:17:47,707 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4c1fe96f5952019ec9864465b4408dac 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-02T14:17:47,722 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/a/ed7f3ea381e94b1d84654d96b747f333 is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1733149063582/Put/seqid=0 2024-12-02T14:17:47,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741935_1115 (size=5958) 2024-12-02T14:17:47,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741935_1115 (size=5958) 2024-12-02T14:17:47,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741935_1115 (size=5958) 2024-12-02T14:17:47,728 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/a/ed7f3ea381e94b1d84654d96b747f333 2024-12-02T14:17:47,746 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/b/0cb4f65fd3b64e3592bd6f662e70cab4 is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1733149063589/Put/seqid=0 2024-12-02T14:17:47,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741936_1116 (size=5958) 2024-12-02T14:17:47,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741936_1116 (size=5958) 2024-12-02T14:17:47,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741936_1116 (size=5958) 2024-12-02T14:17:48,153 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/b/0cb4f65fd3b64e3592bd6f662e70cab4 2024-12-02T14:17:48,174 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/c/68678784a43648b0a62aad60f7f753fc is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1733149063595/Put/seqid=0 2024-12-02T14:17:48,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741937_1117 (size=5958) 2024-12-02T14:17:48,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741937_1117 (size=5958) 2024-12-02T14:17:48,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741937_1117 (size=5958) 2024-12-02T14:17:48,183 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/c/68678784a43648b0a62aad60f7f753fc 2024-12-02T14:17:48,188 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/a/ed7f3ea381e94b1d84654d96b747f333 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/a/ed7f3ea381e94b1d84654d96b747f333 2024-12-02T14:17:48,192 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/a/ed7f3ea381e94b1d84654d96b747f333, entries=10, sequenceid=66, filesize=5.8 K 2024-12-02T14:17:48,192 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/b/0cb4f65fd3b64e3592bd6f662e70cab4 as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/b/0cb4f65fd3b64e3592bd6f662e70cab4 2024-12-02T14:17:48,197 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/b/0cb4f65fd3b64e3592bd6f662e70cab4, entries=10, sequenceid=66, filesize=5.8 K 2024-12-02T14:17:48,197 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/.tmp/c/68678784a43648b0a62aad60f7f753fc as hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/c/68678784a43648b0a62aad60f7f753fc 2024-12-02T14:17:48,201 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/c/68678784a43648b0a62aad60f7f753fc, entries=10, sequenceid=66, filesize=5.8 K 2024-12-02T14:17:48,201 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 4c1fe96f5952019ec9864465b4408dac in 494ms, sequenceid=66, compaction requested=false; wal=null 2024-12-02T14:17:48,202 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/0000000000000000066 2024-12-02T14:17:48,203 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:48,203 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:48,204 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-02T14:17:48,205 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 4c1fe96f5952019ec9864465b4408dac 2024-12-02T14:17:48,207 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/hbase/data/default/testReplayEditsWrittenViaHRegion/4c1fe96f5952019ec9864465b4408dac/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-12-02T14:17:48,208 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 4c1fe96f5952019ec9864465b4408dac; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63744225, jitterRate=-0.050137028098106384}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-02T14:17:48,208 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 4c1fe96f5952019ec9864465b4408dac: Writing region info on filesystem at 1733149067678Initializing all the Stores at 1733149067679 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149067679Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149067679Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733149067679Obtaining lock to block concurrent updates at 1733149067707 (+28 ms)Preparing flush snapshotting stores in 4c1fe96f5952019ec9864465b4408dac at 1733149067707Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1733149067707Flushing stores of testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. at 1733149067707Flushing 4c1fe96f5952019ec9864465b4408dac/a: creating writer at 1733149067707Flushing 4c1fe96f5952019ec9864465b4408dac/a: appending metadata at 1733149067721 (+14 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/a: closing flushed file at 1733149067721Flushing 4c1fe96f5952019ec9864465b4408dac/b: creating writer at 1733149067732 (+11 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/b: appending metadata at 1733149067746 (+14 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/b: closing flushed file at 1733149067746Flushing 4c1fe96f5952019ec9864465b4408dac/c: creating writer at 1733149068159 (+413 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/c: appending metadata at 1733149068173 (+14 ms)Flushing 4c1fe96f5952019ec9864465b4408dac/c: closing flushed file at 1733149068173Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8529923: reopening flushed file at 1733149068187 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@100c9fdf: reopening flushed file at 1733149068192 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55869a1b: reopening flushed file at 1733149068197 (+5 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 4c1fe96f5952019ec9864465b4408dac in 494ms, sequenceid=66, compaction requested=false; wal=null at 1733149068201 (+4 ms)Cleaning up temporary data from old regions at 1733149068203 (+2 ms)Region opened successfully at 1733149068208 (+5 ms) 2024-12-02T14:17:48,221 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 4c1fe96f5952019ec9864465b4408dac, disabling compactions & flushes 2024-12-02T14:17:48,221 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:48,221 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:48,221 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. after waiting 0 ms 2024-12-02T14:17:48,221 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:48,223 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733149063233.4c1fe96f5952019ec9864465b4408dac. 2024-12-02T14:17:48,223 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 4c1fe96f5952019ec9864465b4408dac: Waiting for close lock at 1733149068221Disabling compacts and flushes for region at 1733149068221Disabling writes for close at 1733149068221Writing region close event to WAL at 1733149068223 (+2 ms)Closed at 1733149068223 2024-12-02T14:17:48,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741934_1114 (size=95) 2024-12-02T14:17:48,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741934_1114 (size=95) 2024-12-02T14:17:48,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741934_1114 (size=95) 2024-12-02T14:17:48,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-02T14:17:48,228 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733149067660) 2024-12-02T14:17:48,246 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=443 (was 434) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:43585 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_795745491_22 at /127.0.0.1:53748 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_795745491_22 at /127.0.0.1:44622 [Waiting for operation #20] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_795745491_22 at /127.0.0.1:38134 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42525 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:42525 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43585 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1404 (was 1334) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=264 (was 278), ProcessCount=11 (was 11), AvailableMemoryMB=4250 (was 4236) - AvailableMemoryMB LEAK? - 2024-12-02T14:17:48,246 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1404 is superior to 1024 2024-12-02T14:17:48,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:17:48,246 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:17:48,246 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:17:48,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:48,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:48,247 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:17:48,247 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:17:48,248 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1914149156, stopped=false 2024-12-02T14:17:48,248 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b4ac66777750,42687,1733149028802 2024-12-02T14:17:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:17:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:17:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:17:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:48,250 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:17:48,251 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:17:48,251 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:17:48,251 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:17:48,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:48,251 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b4ac66777750,40955,1733149029496' ***** 2024-12-02T14:17:48,251 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:17:48,251 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b4ac66777750,43009,1733149029645' ***** 2024-12-02T14:17:48,251 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:17:48,252 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:17:48,252 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:17:48,252 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:17:48,252 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:17:48,252 INFO [RS:2;b4ac66777750:43009 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:17:48,252 INFO [RS:0;b4ac66777750:40955 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:17:48,252 INFO [RS:0;b4ac66777750:40955 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:17:48,252 INFO [RS:2;b4ac66777750:43009 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:17:48,252 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:17:48,252 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(3091): Received CLOSE for 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:48,252 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(959): stopping server b4ac66777750,43009,1733149029645 2024-12-02T14:17:48,252 INFO [RS:2;b4ac66777750:43009 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:17:48,252 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(959): stopping server b4ac66777750,40955,1733149029496 2024-12-02T14:17:48,252 INFO [RS:2;b4ac66777750:43009 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b4ac66777750:43009. 2024-12-02T14:17:48,252 INFO [RS:0;b4ac66777750:40955 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:17:48,252 DEBUG [RS:2;b4ac66777750:43009 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:17:48,252 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:17:48,252 INFO [RS:0;b4ac66777750:40955 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b4ac66777750:40955. 2024-12-02T14:17:48,252 DEBUG [RS:2;b4ac66777750:43009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:48,252 DEBUG [RS:0;b4ac66777750:40955 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:17:48,252 DEBUG [RS:0;b4ac66777750:40955 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:48,252 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0738ea0faaf2c5867685e891599fe105, disabling compactions & flushes 2024-12-02T14:17:48,252 INFO [RS:2;b4ac66777750:43009 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:17:48,253 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T14:17:48,253 INFO [RS:2;b4ac66777750:43009 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:17:48,253 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1325): Online Regions={0738ea0faaf2c5867685e891599fe105=testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105.} 2024-12-02T14:17:48,253 DEBUG [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1351): Waiting on 0738ea0faaf2c5867685e891599fe105 2024-12-02T14:17:48,253 INFO [RS:2;b4ac66777750:43009 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:17:48,253 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:48,253 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:48,253 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:17:48,253 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. after waiting 0 ms 2024-12-02T14:17:48,253 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:48,254 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T14:17:48,254 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T14:17:48,254 DEBUG [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-02T14:17:48,254 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:17:48,254 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:17:48,254 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:17:48,254 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:17:48,254 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:17:48,254 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=6.86 KB heapSize=11.45 KB 2024-12-02T14:17:48,258 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/default/testReplayEditsAfterRegionMovedWithMultiCF/0738ea0faaf2c5867685e891599fe105/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-12-02T14:17:48,259 INFO [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:48,259 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0738ea0faaf2c5867685e891599fe105: Waiting for close lock at 1733149068252Running coprocessor pre-close hooks at 1733149068252Disabling compacts and flushes for region at 1733149068252Disabling writes for close at 1733149068253 (+1 ms)Writing region close event to WAL at 1733149068254 (+1 ms)Running coprocessor post-close hooks at 1733149068259 (+5 ms)Closed at 1733149068259 2024-12-02T14:17:48,259 DEBUG [RS_CLOSE_REGION-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105. 2024-12-02T14:17:48,261 INFO [regionserver/b4ac66777750:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:17:48,262 INFO [regionserver/b4ac66777750:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:17:48,276 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/info/6afef266da7f4bdda464c715d7abc54c is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733149044610.0738ea0faaf2c5867685e891599fe105./info:regioninfo/1733149047690/Put/seqid=0 2024-12-02T14:17:48,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741938_1118 (size=8243) 2024-12-02T14:17:48,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741938_1118 (size=8243) 2024-12-02T14:17:48,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741938_1118 (size=8243) 2024-12-02T14:17:48,285 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.65 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/info/6afef266da7f4bdda464c715d7abc54c 2024-12-02T14:17:48,306 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/ns/9990c7bd35d24a09947309326aa5a22a is 43, key is default/ns:d/1733149031993/Put/seqid=0 2024-12-02T14:17:48,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741939_1119 (size=5153) 2024-12-02T14:17:48,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741939_1119 (size=5153) 2024-12-02T14:17:48,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741939_1119 (size=5153) 2024-12-02T14:17:48,313 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/ns/9990c7bd35d24a09947309326aa5a22a 2024-12-02T14:17:48,333 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/table/d22bd9a7c34348168470e12bafe45eac is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1733149045034/Put/seqid=0 2024-12-02T14:17:48,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741940_1120 (size=5431) 2024-12-02T14:17:48,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741940_1120 (size=5431) 2024-12-02T14:17:48,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741940_1120 (size=5431) 2024-12-02T14:17:48,340 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/table/d22bd9a7c34348168470e12bafe45eac 2024-12-02T14:17:48,345 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/info/6afef266da7f4bdda464c715d7abc54c as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/info/6afef266da7f4bdda464c715d7abc54c 2024-12-02T14:17:48,350 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/info/6afef266da7f4bdda464c715d7abc54c, entries=18, sequenceid=21, filesize=8.0 K 2024-12-02T14:17:48,351 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/ns/9990c7bd35d24a09947309326aa5a22a as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/ns/9990c7bd35d24a09947309326aa5a22a 2024-12-02T14:17:48,356 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/ns/9990c7bd35d24a09947309326aa5a22a, entries=2, sequenceid=21, filesize=5.0 K 2024-12-02T14:17:48,357 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/.tmp/table/d22bd9a7c34348168470e12bafe45eac as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/table/d22bd9a7c34348168470e12bafe45eac 2024-12-02T14:17:48,361 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/table/d22bd9a7c34348168470e12bafe45eac, entries=2, sequenceid=21, filesize=5.3 K 2024-12-02T14:17:48,362 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=21, compaction requested=false 2024-12-02T14:17:48,367 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-02T14:17:48,367 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:17:48,367 INFO [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:17:48,367 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733149068254Running coprocessor pre-close hooks at 1733149068254Disabling compacts and flushes for region at 1733149068254Disabling writes for close at 1733149068254Obtaining lock to block concurrent updates at 1733149068254Preparing flush snapshotting stores in 1588230740 at 1733149068254Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=7029, getHeapSize=11664, getOffHeapSize=0, getCellsCount=48 at 1733149068254Flushing stores of hbase:meta,,1.1588230740 at 1733149068255 (+1 ms)Flushing 1588230740/info: creating writer at 1733149068255Flushing 1588230740/info: appending metadata at 1733149068276 (+21 ms)Flushing 1588230740/info: closing flushed file at 1733149068276Flushing 1588230740/ns: creating writer at 1733149068290 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733149068305 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733149068305Flushing 1588230740/table: creating writer at 1733149068318 (+13 ms)Flushing 1588230740/table: appending metadata at 1733149068333 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733149068333Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4226e82e: reopening flushed file at 1733149068345 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@266a1181: reopening flushed file at 1733149068351 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17fe2d35: reopening flushed file at 1733149068356 (+5 ms)Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=21, compaction requested=false at 1733149068362 (+6 ms)Writing region close event to WAL at 1733149068364 (+2 ms)Running coprocessor post-close hooks at 1733149068367 (+3 ms)Closed at 1733149068367 2024-12-02T14:17:48,368 DEBUG [RS_CLOSE_META-regionserver/b4ac66777750:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:17:48,453 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(976): stopping server b4ac66777750,40955,1733149029496; all regions closed. 2024-12-02T14:17:48,454 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(976): stopping server b4ac66777750,43009,1733149029645; all regions closed. 2024-12-02T14:17:48,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741835_1011 (size=2179) 2024-12-02T14:17:48,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741835_1011 (size=2179) 2024-12-02T14:17:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741836_1012 (size=4694) 2024-12-02T14:17:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741835_1011 (size=2179) 2024-12-02T14:17:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741836_1012 (size=4694) 2024-12-02T14:17:48,459 DEBUG [RS:0;b4ac66777750:40955 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs 2024-12-02T14:17:48,459 INFO [RS:0;b4ac66777750:40955 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b4ac66777750%2C40955%2C1733149029496:(num 1733149031392) 2024-12-02T14:17:48,459 DEBUG [RS:0;b4ac66777750:40955 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:48,459 DEBUG [RS:2;b4ac66777750:43009 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs 2024-12-02T14:17:48,459 INFO [RS:0;b4ac66777750:40955 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:17:48,459 INFO [RS:2;b4ac66777750:43009 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b4ac66777750%2C43009%2C1733149029645.meta:.meta(num 1733149031798) 2024-12-02T14:17:48,459 INFO [RS:0;b4ac66777750:40955 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:17:48,460 INFO [RS:0;b4ac66777750:40955 {}] hbase.ChoreService(370): Chore service for: regionserver/b4ac66777750:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T14:17:48,460 INFO [RS:0;b4ac66777750:40955 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:17:48,460 INFO [RS:0;b4ac66777750:40955 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:17:48,460 INFO [regionserver/b4ac66777750:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:17:48,460 INFO [RS:0;b4ac66777750:40955 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:17:48,460 INFO [RS:0;b4ac66777750:40955 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:17:48,460 INFO [RS:0;b4ac66777750:40955 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40955 2024-12-02T14:17:48,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741833_1009 (size=95) 2024-12-02T14:17:48,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741833_1009 (size=95) 2024-12-02T14:17:48,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741833_1009 (size=95) 2024-12-02T14:17:48,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b4ac66777750,40955,1733149029496 2024-12-02T14:17:48,463 INFO [RS:0;b4ac66777750:40955 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:17:48,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:17:48,465 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b4ac66777750,40955,1733149029496] 2024-12-02T14:17:48,465 DEBUG [RS:2;b4ac66777750:43009 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/oldWALs 2024-12-02T14:17:48,465 INFO [RS:2;b4ac66777750:43009 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b4ac66777750%2C43009%2C1733149029645:(num 1733149031391) 2024-12-02T14:17:48,465 DEBUG [RS:2;b4ac66777750:43009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:17:48,465 INFO [RS:2;b4ac66777750:43009 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:17:48,465 INFO [RS:2;b4ac66777750:43009 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:17:48,466 INFO [RS:2;b4ac66777750:43009 {}] hbase.ChoreService(370): Chore service for: regionserver/b4ac66777750:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T14:17:48,466 INFO [RS:2;b4ac66777750:43009 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:17:48,466 INFO [regionserver/b4ac66777750:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:17:48,466 INFO [RS:2;b4ac66777750:43009 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43009 2024-12-02T14:17:48,467 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b4ac66777750,40955,1733149029496 already deleted, retry=false 2024-12-02T14:17:48,467 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b4ac66777750,40955,1733149029496 expired; onlineServers=1 2024-12-02T14:17:48,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b4ac66777750,43009,1733149029645 2024-12-02T14:17:48,468 INFO [RS:2;b4ac66777750:43009 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:17:48,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:17:48,469 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b4ac66777750,43009,1733149029645] 2024-12-02T14:17:48,470 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b4ac66777750,43009,1733149029645 already deleted, retry=false 2024-12-02T14:17:48,470 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b4ac66777750,43009,1733149029645 expired; onlineServers=0 2024-12-02T14:17:48,470 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b4ac66777750,42687,1733149028802' ***** 2024-12-02T14:17:48,471 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:17:48,471 INFO [M:0;b4ac66777750:42687 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:17:48,471 INFO [M:0;b4ac66777750:42687 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:17:48,471 DEBUG [M:0;b4ac66777750:42687 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:17:48,471 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:17:48,471 DEBUG [M:0;b4ac66777750:42687 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:17:48,471 DEBUG [master/b4ac66777750:0:becomeActiveMaster-HFileCleaner.large.0-1733149031024 {}] cleaner.HFileCleaner(306): Exit Thread[master/b4ac66777750:0:becomeActiveMaster-HFileCleaner.large.0-1733149031024,5,FailOnTimeoutGroup] 2024-12-02T14:17:48,471 DEBUG [master/b4ac66777750:0:becomeActiveMaster-HFileCleaner.small.0-1733149031026 {}] cleaner.HFileCleaner(306): Exit Thread[master/b4ac66777750:0:becomeActiveMaster-HFileCleaner.small.0-1733149031026,5,FailOnTimeoutGroup] 2024-12-02T14:17:48,471 INFO [M:0;b4ac66777750:42687 {}] hbase.ChoreService(370): Chore service for: master/b4ac66777750:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:17:48,471 INFO [M:0;b4ac66777750:42687 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:17:48,471 DEBUG [M:0;b4ac66777750:42687 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:17:48,471 INFO [M:0;b4ac66777750:42687 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:17:48,472 INFO [M:0;b4ac66777750:42687 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:17:48,472 INFO [M:0;b4ac66777750:42687 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:17:48,472 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:17:48,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:17:48,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:17:48,473 DEBUG [M:0;b4ac66777750:42687 {}] zookeeper.ZKUtil(347): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:17:48,473 WARN [M:0;b4ac66777750:42687 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:17:48,474 INFO [M:0;b4ac66777750:42687 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/.lastflushedseqids 2024-12-02T14:17:48,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741941_1121 (size=138) 2024-12-02T14:17:48,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741941_1121 (size=138) 2024-12-02T14:17:48,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741941_1121 (size=138) 2024-12-02T14:17:48,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:48,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40955-0x1009b59793c0001, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:48,565 INFO [RS:0;b4ac66777750:40955 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:17:48,565 INFO [RS:0;b4ac66777750:40955 {}] regionserver.HRegionServer(1031): Exiting; stopping=b4ac66777750,40955,1733149029496; zookeeper connection closed. 2024-12-02T14:17:48,566 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d118bb4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d118bb4 2024-12-02T14:17:48,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:48,569 INFO [RS:2;b4ac66777750:43009 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:17:48,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43009-0x1009b59793c0003, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:48,569 INFO [RS:2;b4ac66777750:43009 {}] regionserver.HRegionServer(1031): Exiting; stopping=b4ac66777750,43009,1733149029645; zookeeper connection closed. 2024-12-02T14:17:48,570 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@53ecd89a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@53ecd89a 2024-12-02T14:17:48,570 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-02T14:17:48,887 INFO [M:0;b4ac66777750:42687 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:17:48,887 INFO [M:0;b4ac66777750:42687 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:17:48,887 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:17:48,887 INFO [M:0;b4ac66777750:42687 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:48,887 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:48,887 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:17:48,887 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:48,888 INFO [M:0;b4ac66777750:42687 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=68.36 KB heapSize=83.73 KB 2024-12-02T14:17:48,904 DEBUG [M:0;b4ac66777750:42687 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c3d181e07ee401e9fe6eadac57eea76 is 82, key is hbase:meta,,1/info:regioninfo/1733149031905/Put/seqid=0 2024-12-02T14:17:48,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741942_1122 (size=5672) 2024-12-02T14:17:48,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741942_1122 (size=5672) 2024-12-02T14:17:48,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741942_1122 (size=5672) 2024-12-02T14:17:48,910 INFO [M:0;b4ac66777750:42687 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c3d181e07ee401e9fe6eadac57eea76 2024-12-02T14:17:48,930 DEBUG [M:0;b4ac66777750:42687 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/055dee5005a4499c8bbb66dc0d4c1021 is 1077, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733149045039/Put/seqid=0 2024-12-02T14:17:48,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741943_1123 (size=7756) 2024-12-02T14:17:48,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741943_1123 (size=7756) 2024-12-02T14:17:48,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741943_1123 (size=7756) 2024-12-02T14:17:48,941 INFO [M:0;b4ac66777750:42687 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.62 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/055dee5005a4499c8bbb66dc0d4c1021 2024-12-02T14:17:48,946 INFO [M:0;b4ac66777750:42687 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 055dee5005a4499c8bbb66dc0d4c1021 2024-12-02T14:17:48,968 DEBUG [M:0;b4ac66777750:42687 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edb54059fb7d42709fee87a731074f78 is 69, key is b4ac66777750,40955,1733149029496/rs:state/1733149031118/Put/seqid=0 2024-12-02T14:17:48,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741944_1124 (size=5445) 2024-12-02T14:17:48,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741944_1124 (size=5445) 2024-12-02T14:17:48,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741944_1124 (size=5445) 2024-12-02T14:17:48,974 INFO [M:0;b4ac66777750:42687 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edb54059fb7d42709fee87a731074f78 2024-12-02T14:17:48,979 INFO [M:0;b4ac66777750:42687 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for edb54059fb7d42709fee87a731074f78 2024-12-02T14:17:48,980 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c3d181e07ee401e9fe6eadac57eea76 as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c3d181e07ee401e9fe6eadac57eea76 2024-12-02T14:17:48,986 INFO [M:0;b4ac66777750:42687 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c3d181e07ee401e9fe6eadac57eea76, entries=8, sequenceid=168, filesize=5.5 K 2024-12-02T14:17:48,987 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/055dee5005a4499c8bbb66dc0d4c1021 as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/055dee5005a4499c8bbb66dc0d4c1021 2024-12-02T14:17:48,991 INFO [M:0;b4ac66777750:42687 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 055dee5005a4499c8bbb66dc0d4c1021 2024-12-02T14:17:48,992 INFO [M:0;b4ac66777750:42687 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/055dee5005a4499c8bbb66dc0d4c1021, entries=17, sequenceid=168, filesize=7.6 K 2024-12-02T14:17:48,992 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edb54059fb7d42709fee87a731074f78 as hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/edb54059fb7d42709fee87a731074f78 2024-12-02T14:17:48,996 INFO [M:0;b4ac66777750:42687 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for edb54059fb7d42709fee87a731074f78 2024-12-02T14:17:48,996 INFO [M:0;b4ac66777750:42687 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42525/user/jenkins/test-data/0da42be8-3202-31cc-b8bb-5d84c381f016/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/edb54059fb7d42709fee87a731074f78, entries=3, sequenceid=168, filesize=5.3 K 2024-12-02T14:17:48,997 INFO [M:0;b4ac66777750:42687 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.36 KB/69996, heapSize ~83.44 KB/85440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=168, compaction requested=false 2024-12-02T14:17:48,999 INFO [M:0;b4ac66777750:42687 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:17:48,999 DEBUG [M:0;b4ac66777750:42687 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733149068887Disabling compacts and flushes for region at 1733149068887Disabling writes for close at 1733149068887Obtaining lock to block concurrent updates at 1733149068888 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733149068888Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=69996, getHeapSize=85680, getOffHeapSize=0, getCellsCount=195 at 1733149068888Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733149068889 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733149068889Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733149068903 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733149068903Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733149068915 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733149068929 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733149068929Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733149068946 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733149068967 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733149068967Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72fa4fc0: reopening flushed file at 1733149068979 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74a49c5: reopening flushed file at 1733149068986 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@469fa316: reopening flushed file at 1733149068992 (+6 ms)Finished flush of dataSize ~68.36 KB/69996, heapSize ~83.44 KB/85440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=168, compaction requested=false at 1733149068997 (+5 ms)Writing region close event to WAL at 1733149068999 (+2 ms)Closed at 1733149068999 2024-12-02T14:17:49,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39417 is added to blk_1073741830_1006 (size=56498) 2024-12-02T14:17:49,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741830_1006 (size=56498) 2024-12-02T14:17:49,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44963 is added to blk_1073741830_1006 (size=56498) 2024-12-02T14:17:49,003 INFO [M:0;b4ac66777750:42687 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:17:49,003 INFO [M:0;b4ac66777750:42687 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42687 2024-12-02T14:17:49,003 INFO [M:0;b4ac66777750:42687 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:17:49,003 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:17:49,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:49,106 INFO [M:0;b4ac66777750:42687 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:17:49,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42687-0x1009b59793c0000, quorum=127.0.0.1:56104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:17:49,112 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048400 with renewLeaseKey: DEFAULT_16655 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048400 (inode 16655) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733149048066/wal.1733149048400 (inode 16655) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-02T14:17:49,113 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733149040089/wal.1733149040306 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:49,113 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733149063231/wal.1733149063470 with renewLeaseKey: DEFAULT_16767 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:49,116 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149057830 with renewLeaseKey: DEFAULT_16678 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149057830 (inode 16678) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733149048579/wal.1733149057830 (inode 16678) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-02T14:17:49,116 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733149033048/wal.1733149033121 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:49,118 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058620 with renewLeaseKey: DEFAULT_16704 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058620 (inode 16704) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733149058043/wal.1733149058620 (inode 16704) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-02T14:17:49,119 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733149058779/wal.1733149058876 with renewLeaseKey: DEFAULT_16726 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:17:49,121 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal.1733149032907 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal.1733149032907 (inode 16485) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733149032712/wal.1733149032907 (inode 16485) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-02T14:17:49,123 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal.1733149032464 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal.1733149032464 (inode 16462) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733149032217/wal.1733149032464 (inode 16462) Holder DFSClient_NONMAPREDUCE_-2060961908_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 25 more 2024-12-02T14:17:49,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35f1150e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:17:49,131 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:17:49,131 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:17:49,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:17:49,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,STOPPED} 2024-12-02T14:17:49,135 WARN [BP-1615503913-172.17.0.3-1733149025365 heartbeating to localhost/127.0.0.1:42525 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:17:49,135 WARN [BP-1615503913-172.17.0.3-1733149025365 heartbeating to localhost/127.0.0.1:42525 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1615503913-172.17.0.3-1733149025365 (Datanode Uuid e58674ab-0b59-436b-aa48-22e8c7f97388) service to localhost/127.0.0.1:42525 2024-12-02T14:17:49,135 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:17:49,135 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:17:49,136 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data5/current/BP-1615503913-172.17.0.3-1733149025365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:17:49,137 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data6/current/BP-1615503913-172.17.0.3-1733149025365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:17:49,137 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:17:49,140 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bd427b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:17:49,141 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:17:49,141 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:17:49,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:17:49,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,STOPPED} 2024-12-02T14:17:49,142 WARN [BP-1615503913-172.17.0.3-1733149025365 heartbeating to localhost/127.0.0.1:42525 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:17:49,142 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:17:49,142 WARN [BP-1615503913-172.17.0.3-1733149025365 heartbeating to localhost/127.0.0.1:42525 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1615503913-172.17.0.3-1733149025365 (Datanode Uuid 68ba86ae-a0c1-47d2-a0fe-7c5ec0e16877) service to localhost/127.0.0.1:42525 2024-12-02T14:17:49,142 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:17:49,143 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data3/current/BP-1615503913-172.17.0.3-1733149025365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:17:49,143 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data4/current/BP-1615503913-172.17.0.3-1733149025365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:17:49,143 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:17:49,146 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@330740de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:17:49,146 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:17:49,146 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:17:49,146 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:17:49,147 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,STOPPED} 2024-12-02T14:17:49,148 WARN [BP-1615503913-172.17.0.3-1733149025365 heartbeating to localhost/127.0.0.1:42525 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:17:49,148 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:17:49,148 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:17:49,148 WARN [BP-1615503913-172.17.0.3-1733149025365 heartbeating to localhost/127.0.0.1:42525 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1615503913-172.17.0.3-1733149025365 (Datanode Uuid 6dedecca-12aa-42e5-808f-23a2471ac0d0) service to localhost/127.0.0.1:42525 2024-12-02T14:17:49,148 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data1/current/BP-1615503913-172.17.0.3-1733149025365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:17:49,149 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/cluster_5a7bd798-2ced-509c-6679-775e1b9afb5a/data/data2/current/BP-1615503913-172.17.0.3-1733149025365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:17:49,149 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:17:49,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3717288f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:17:49,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:17:49,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:17:49,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:17:49,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-server/target/test-data/d1029182-bde1-f6e5-265f-b2f9301d5375/hadoop.log.dir/,STOPPED} 2024-12-02T14:17:49,165 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:17:49,216 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down